
    qi                     8    d dl mZ d dlmZ  G d de      ZdgZy)   )PretrainedConfig)RopeParametersc            '            e Zd ZdZdZdgZdddddddZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dded	ed
edz  dedededz  dedede	de	de	de
ez  dz  dede	dz  dededededef& fdZ xZS )NanoChatConfiga  
    This is the configuration class to store the configuration of a [`NanoChatModel`]. It is used to instantiate a
    NanoChat model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the [karpathy/nanochat-d32](https://huggingface.co/karpathy/nanochat-d32).

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 50304):
            Vocabulary size of the NanoChat model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`NanoChatModel`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 8192):
            Dimension of the MLP representations. If `None`, it will be computed based on the model architecture.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 6):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
            `num_attention_heads`.
        max_position_embeddings (`int`, *optional*, defaults to 2048):
            The maximum sequence length that this model might ever be used with.
        hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
            The non-linear activation function (function or string) in the decoder.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rope_parameters (`RopeParameters`, *optional*):
            Dictionary containing the configuration parameters for the RoPE embeddings. The dictionaty should contain
            a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
            with longer `max_position_embeddings`.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        final_logit_softcapping (`float`, *optional*, defaults to 15.0):
            scaling factor when applying tanh softcapping on the logits.
        attention_bias (`bool`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, and value projection layers during self-attention.
        bos_token_id (`int`, *optional*, defaults to 0):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 1):
            End of stream token id.
        pad_token_id (`int`, *optional*, defaults to 1):
            Padding token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie weight embeddings

    ```python
    >>> from transformers import NanoChatModel, NanoChatConfig

    >>> # Initializing a NanoChat style configuration
    >>> configuration = NanoChatConfig()

    >>> # Initializing a model from the NanoChat style configuration
    >>> model = NanoChatModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```nanochatpast_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.fc1zlayers.*.mlp.fc2N
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headsmax_position_embeddings
hidden_actattention_dropoutrms_norm_epsinitializer_rangerope_parameters	use_cachefinal_logit_softcappingattention_biasbos_token_ideos_token_idpad_token_idtie_word_embeddingsc                 6   || _         || _        || _        || _        || _        ||}|| _        || _        || _        |	| _        |
| _	        || _
        || _        || _        || _        || _        || _        || _        || _        || _        t'        | P  di | y )N )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   super__init__)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                        e/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/nanochat/configuration_nanochat.pyr!   zNanoChatConfig.__init__h   s    . %&!2!2#6  &"5#6 '>$$!2(!2"'>$,.(((#6 "6"    )i  i   i          Ni   relu2g        gư>g{Gz?NTg      .@F       r+   F)__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planintstrfloatr   dictboolr!   __classcell__)r$   s   @r%   r   r      sM   FP J#4"5 &/%.%.%.%%  (,!##$*.'+!#&"#'8<04$$))0#0# 0# :	0#
 0# !0# !4Z0# "%0# 0# !0# 0# !0# ($.50# 0# "'0#  !0#" #0#$ %0#& '0#( ")0# 0#r&   r   N)configuration_utilsr   modeling_rope_utilsr   r   __all__r   r&   r%   <module>r<      s'    4 1E#% E#P 
r&   