
    qi8                         d Z ddlmZ ddlmZ ddlmZ  ej                  e      Z	 G d de      Z
 G d de      ZdgZy	)
zEvolla model configuration   )PreTrainedConfig)RopeParameters)loggingc                   F     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )SaProtConfiga
  This is the configuration class to store the configuration of a [`EvollaSaProtProteinEncoder`]. It is used to instantiate a
    SaProt model according to the specified arguments, defining the model architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 446):
            Vocabulary size of the protein sequence model. Defines the number of different tokens that can be represented
            by the `inputs_ids` passed when calling [`EvollaModel`].
        mask_token_id (`int`, *optional*, defaults to 4):
            The id of the *mask* token in the protein sequence model.
        pad_token_id (`int`, *optional*, defaults to 1):
            The id of the *padding* token in the protein sequence model.
        hidden_size (`int`, *optional*, defaults to 1280):
            Dimensionality of the protein sequence model layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 33):
            Number of hidden layers in the protein sequence model.
        num_attention_heads (`int`, *optional*, defaults to 20):
            Number of attention heads for each attention layer in the protein sequence model.
        intermediate_size (`int`, *optional*, defaults to 5120):
            Dimensionality of the intermediate layers in the protein sequence model.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the hidden layers in the protein sequence model.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities in the protein sequence model.
        max_position_embeddings (`int`, *optional*, defaults to 1026):
            The maximum sequence length that the protein sequence model might ever be used with. Typically set this to
            something large just in case (e.g., 512 or 1024 or 2048).
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon value for the layer normalization layer in the protein sequence model.
        position_embedding_type (`str`, *optional*, defaults to `"rotary"`):
            The type of position embedding to use in the protein sequence model. Currently only `"rotary"` is supported.
        emb_layer_norm_before (`bool`, *optional*, defaults to `False`):
            Whether to apply layer normalization before the position embedding in the protein sequence model.
        token_dropout (`bool`, *optional*, defaults to `True`):
            Whether to apply dropout to the tokens in the protein sequence model.c                    t        |   di | || _        || _        || _        || _        || _        || _        || _        || _	        || _
        || _        |	| _        |
| _        || _        || _        || _        || _        || _        y )N )super__init__pad_token_idmask_token_id
is_decoderadd_cross_attention
vocab_sizehidden_sizenum_hidden_layersnum_attention_headsintermediate_sizehidden_dropout_probattention_probs_dropout_probmax_position_embeddingsinitializer_rangelayer_norm_epsposition_embedding_typeemb_layer_norm_beforetoken_dropout)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                      a/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/evolla/configuration_evolla.pyr   zSaProtConfig.__init__?   s    * 	"6"(*$#6 $&!2#6 !2#6 ,H)'>$!2,'>$%:"*    )i        i   !      i   皙?r&   i  {Gz?h㈵>rotaryFTFF)__name__
__module____qualname____doc__r   __classcell__r   s   @r    r   r      sI    $UP %( $ (#!%'+ '+r!   r   c            @           e Zd ZdZdZdeiZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d%dedz  de	dz  de	dz  de	dz  d	e	dz  d
e	dz  de	dz  de
dz  de	dz  de	dz  deee
ef   z  dz  dedz  dedz  dedz  de	dz  dedz  dedz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  dedz  de	dz  de	dz  de	dz  d edz  d!edz  d"edz  d#edz  f> fd$Z xZS )&EvollaConfiga  
    This is the configuration class to store the configuration of a [`EvollaModel`]. It is used to instantiate an
    Evolla model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Evolla-10B.

    e.g. [westlake-repl/Evolla-10B-hf](https://huggingface.co/westlake-repl/Evolla-10B-hf)

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        protein_encoder_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`SaProtConfig`].
        vocab_size (`int`, *optional*, defaults to 128256):
            Vocabulary size of the Evolla llama model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`EvollaModel`].
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimensionality of the llama layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 14336):
            Dimensionality of the intermediate layers in the llama model.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the llama model.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the llama model.
        num_key_value_heads (`int`, *optional*, defaults to 8):
            Number of key-value pairs for each attention layer in the llama model.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the llama model. If string, `"gelu"`, `"relu"`,
            `"selu"` and `"silu"` are supported.
        max_position_embeddings (`int`, *optional*, defaults to 8192):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon value for the RMS-norm layer in the llama model.
        rope_parameters (`float`, *optional*):
            The scaling factor for the RoPE layer in the llama model.
        attention_bias (`bool`, *optional*, defaults to `False`):
            Whether to use bias in the attention layer.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention layer.
        mlp_bias (`bool`, *optional*, defaults to `False`):
            Whether to use bias in the MLP layer.
        aligner_ffn_mult (`int`, *optional*, defaults to 4):
            The FFN multiplier for the aligner layer.
        aligner_enable_bias (`bool`, *optional*, defaults to `True`):
            Whether to use bias in the aligner layer.
        aligner_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities in the aligner layer.
        aligner_num_add_layers (`int`, *optional*, defaults to 8):
            The number of additional layers for the aligner layer.
        resampler_depth (`int`, *optional*, defaults to 6):
            The depth of the resampler layer in the llama model.
        resampler_dim_head (`int`, *optional*, defaults to 64):
            The dimension of the heads in the resampler layer in the llama model.
        resampler_heads (`int`, *optional*, defaults to 8):
            The number of heads in the resampler layer in the llama model.
        resampler_num_latents (`int`, *optional*, defaults to 64):
            The number of latents in the resampler layer in the llama model.
        resampler_ff_mult (`int`, *optional*, defaults to 4):
            The FFN multiplier for the resampler layer.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        pad_token_id (`int`, *optional*):
            The id of the *padding* token.
        bos_token_id (`int`, *optional*, defaults to 128000):
            The id of the *beginning-of-sequence* token.
        eos_token_id (`int`, *optional*, defaults to 128009):
            The id of the *end-of-sequence* token.
        use_cache (`bool`, *optional*, defaults to `False`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the input and output word embeddings.
        is_decoder (`bool`, *optional*, defaults to `False`):
            Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
            decoder-only or encoder-only architectures.
        add_cross_attention (`bool`, *optional*, defaults to `False`):
            Whether cross-attention layers should be added to the model.

    Example:

    ```python
    >>> from transformers import EvollaModel, EvollaConfig

    >>> # Initializing a Evolla evolla-10b style configuration
    >>> configuration = EvollaConfig()

    >>> # Initializing a model from the evolla-10b style configuration
    >>> model = EvollaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```EvollaModelprotein_encoder_configg    ANr   r   r   r   r   num_key_value_heads
hidden_actr   rms_norm_epsrope_parametersattention_biasattention_dropoutmlp_biasaligner_ffn_multaligner_enable_bias$aligner_attention_probs_dropout_probaligner_num_add_layersresampler_depthresampler_dim_headresampler_headsresampler_num_latentsresampler_ff_multr   r   bos_token_ideos_token_id	use_cachetie_word_embeddingsr   r   c                  (   || _         || _        || _        || _        || _        || _        || _        || _        || _        |	| _	        |
| _
        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        |i }t6        j9                  d       t;        di || _        || _        || _        || _         || _!        tE        !|   di |  y )NzX`protein_encoder_config` is `None`. Initializing the `SaProtConfig` with default values.r	   )$r   r   r   r   r   r   r   r4   r5   r   r6   rG   r8   r9   r:   r;   r<   r=   r>   rF   r   r?   r@   rA   rB   rC   r7   loggerinfor   r3   r   rD   rE   r
   r   )"r   r3   r   r   r   r   r   r4   r5   r   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   r   r   rD   rE   rF   rG   r   r   r   r   s"                                    r    r   zEvollaConfig.__init__   s5   F %#6 $&!2!2#6 #6 $'>$(#6 ,!2  0#6 4X1&<#"!2."4.%:"!2. ")%'"KKrs&2&L5K&L##6 ((("6"r!   )Ni  i   i 8      rK      silui    r(   NFg        Fr"   Tr&   rL      @   rL   rO   r"   r'   Ni  i	 FFFF)r*   r+   r,   r-   
model_typer   sub_configsdefault_thetadictintstrr   boolfloatr   r.   r/   s   @r    r1   r1   i   sm   [z J+\:KM /3!'"&(-(**,*+!'.2#(MQ&+*- %'(+/=@-.&')+&',.()*.#'#)#)!&+0"'+0AJ# $tJ# $JJ# 4Z	J#
 :J# :J# !4ZJ# !4ZJ# $JJ# "%tJ# DjJ# ($sN/B*CCdJJ# tJ# !4<J# +J#  *!J#" "D[#J#$ /4dl%J#& !$d
'J#( t)J#*  $J+J#, t-J#.  #Tz/J#0 :1J#2 !4<3J#4 Dj5J#6 Dj7J#8 Dj9J#: $;;J#< "D[=J#> 4K?J#@ "D[AJ# J#r!   r1   N)r-   configuration_utilsr   modeling_rope_utilsr   utilsr   
get_loggerr*   rI   r   r1   __all__r	   r!   r    <module>r]      sQ    ! 3 1  
		H	%N+# N+bl## l#^ 
r!   