
    qi*                     l    d dl mZmZ d dlmZ d dlmZ  ej                  e      Z	 G d de      Z
dgZy)   )PreTrainedConfiglayer_type_validation)RopeParameters)loggingc            >            e Zd ZdZdZdgZdddddddddddddddd	Zd
gdgfddgdgfdgdgfdZddiZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d0de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de	dz  de
dz  dedz  d e	dz  d!edz  d"e	dz  d#e
dz  d$e
dz  d%eeeef   z  dz  d&e
dz  d'edz  d(edz  d)e	dz  d*e	dz  d+ee   dz  d,e	dz  d-e	dz  d.e	dz  f< fd/Z xZS )1Dots1Configa=  
    This is the configuration class to store the configuration of a [`Dots1Model`]. It is used to instantiate a
    `dots.llm1` model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of
    [rednote-hilab/dots.llm1.base](https://huggingface.co/rednote-hilab/dots.llm1.base).

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 152064):
            Vocabulary size of the model. Defines the number of different tokens that can be represented by the
            `input_ids` passed when calling [`Dots1Model`].
        hidden_size (`int`, *optional*, defaults to 4608):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 10944):
            Dimension of the MLP representations.
        moe_intermediate_size (`int`, *optional*, defaults to 1408):
            Dimension of the MoE representations.
        num_hidden_layers (`int`, *optional*, defaults to 62):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*, defaults to 32):
            Number of key/value heads for Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, Multi
            Head Attention (MHA) is used. If `num_key_value_heads=1`, Multi Query Attention (MQA) is used. Otherwise,
            Grouped Query Attention (GQA) is used. If not specified, defaults to `num_attention_heads`.
        n_shared_experts (`int`, *optional*, default=None):
            Number of shared experts. None means dense model.
        n_routed_experts (`int`, *optional*, default=None):
            Number of routed experts. None means dense model.
        n_group (`int`, *optional*, defaults to 1):
            Number of groups for routed experts.
        topk_group (`int`, *optional*, defaults to 1):
            Number of selected groups for each token (selected experts only within `topk_group` groups).
        num_experts_per_tok (`int`, *optional*, default=None):
            Number of selected experts. None means dense model.
        first_k_dense_replace (`int`, *optional*, defaults to 0):
            Number of dense layers at the beginning of the model before the first MoE layer.
        norm_topk_prob (`bool`, *optional*, defaults to `False`):
            Whether to normalize the weights of the routed experts.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string).
        max_position_embeddings (`int`, *optional*, defaults to 2048):
            Maximum sequence length the model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            Standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            Epsilon used by the RMS normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions. Only relevant if `config.is_decoder=True`.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie the input and output word embeddings.
        rope_parameters (`RopeParameters`, *optional*):
            Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
            a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
            with longer `max_position_embeddings`.
        attention_bias (`bool`, *optional*, defaults to `False`):
            Whether to use a bias in the self-attention projections.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            Dropout ratio for the attention probabilities.
        routed_scaling_factor (`float`, *optional*, defaults to 1.0):
            Scaling factor for routed experts.
        sliding_window (`int`, *optional*, defaults to 4096):
            Size of the sliding window for attention. If not specified, defaults to `4096`.
        max_window_layers (`int`, *optional*, defaults to 62):
            The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
            additional layer afterwards will use SWA (Sliding Window Attention).
        layer_types (`list`, *optional*):
            Attention pattern for each layer.
        pad_token_id (`int`, *optional*):
            Padding token id.
        bos_token_id (`int`, *optional*):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*):
            End of stream token id.

    Examples:
        ```python
        >>> from transformers import Dots1Model, Dots1Config

        >>> # Initializing a Dots1 style configuration
        >>> configuration = Dots1Config()

        >>> # Accessing the model configuration
        >>> configuration = model.config
        ```
    dots1past_key_valuescolwiserowwisereplicated_with_grad_allreducepacked_colwisemoe_tp_experts)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.self_attn.q_normzlayers.*.self_attn.k_normz!layers.*.mlp.experts.gate_up_projzlayers.*.mlp.experts.down_projzlayers.*.mlp.expertsz%layers.*.mlp.shared_experts.gate_projz#layers.*.mlp.shared_experts.up_projz%layers.*.mlp.shared_experts.down_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnormnum_local_expertsn_routed_expertsN
vocab_sizehidden_sizeintermediate_sizemoe_intermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headsn_shared_expertsn_group
topk_groupnum_experts_per_tokfirst_k_dense_replacenorm_topk_prob
hidden_actmax_position_embeddingsinitializer_rangerms_norm_eps	use_cachetie_word_embeddingsrope_parametersattention_biasattention_dropoutrouted_scaling_factorsliding_windowmax_window_layerslayer_typespad_token_idbos_token_ideos_token_idc                    || _         || _        || _        || _        || _        || _        || _        || _        |	| _        || _	        || _
        || _        ||}|
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        | j0                  Et3        | j
                        D  cg c]!  } | j,                  | | j.                  k\  rdnd# c} | _        t5        | j0                  | j
                         || _        || _        || _        || _        || _        tA        !|   di | y c c} w )Nsliding_attentionfull_attention )"r   r'   r   r   r   r   r   r    r   r#   r$   r%   r!   r"   r   r&   r(   r)   r*   r-   r.   r/   r0   r1   r2   ranger   r+   r3   r4   r5   r,   super__init__)"selfr   r   r   r   r   r   r   r    r   r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   kwargsi	__class__s"                                    _/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/dots1/configuration_dots1.pyr<   zDots1Config.__init__   s   D %'>$&!2%:"!2#6  0 0#6 %:",&"5$#6 $!2(",!2%:",!2&#
 t556	   &&2qD<R<R7R $%& D 	d..0F0FG#6 (((."6" s   &E)i R i   i*  i  >       rC   NN   rD   N    Fsilui   g{Gz?gư>TFNFg        g      ?i   rB   NNNN)__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planattribute_mapintboolstrfloatr   dictlistr<   __classcell__)r@   s   @rA   r   r      s   Wr J#4"5 &/%.%.%.%E%E-=*3 01:/81:"+ )"+& &(9:#%568IJ!"_$56 	/M "("&(-,0(**,*,'+'+!"*.,-&+!'.2*.#'!%+0MQ&+*-.1%)(*(,#'#'#'?M#$JM# 4ZM# :	M#
  #TzM# :M# !4ZM# !4ZM# *M# *M# tM# $JM# !4ZM#  #TzM# tM#  $J!M#" "%t#M#$ !4<%M#& Dj'M#( $;)M#* "D[+M#, ($sN/B*CCdJ-M#. t/M#0 !4<1M#2  %t|3M#4 d
5M#6 :7M#8 #Y%9M#: Dj;M#< Dj=M#> Dj?M# M#    r   N)configuration_utilsr   r   modeling_rope_utilsr   utilsr   
get_loggerrG   loggerr   __all__r9   rW   rA   <module>r^      s>    K 1  
		H	%E#" E#P /rW   