
    qiC                         d Z ddlmZ ddlmZ  ej
                  e      Z G d de      Z G d de      Z	 G d d	e      Z
g d
Zy)zCLIP model configuration   )PreTrainedConfig)loggingc                   J     e Zd ZdZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )CLIPTextConfiga  
    This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP
    text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the text encoder of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 49408):
            Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`CLIPModel`].
        hidden_size (`int`, *optional*, defaults to 512):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 2048):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        max_position_embeddings (`int`, *optional*, defaults to 77):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).
        pad_token_id (`int`, *optional*, defaults to 1):
            Padding token id.
        bos_token_id (`int`, *optional*, defaults to 49406):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 49407):
            End of stream token id.

    Example:

    ```python
    >>> from transformers import CLIPTextConfig, CLIPTextModel

    >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPTextConfig()

    >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPTextModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```clip_text_modeltext_configc                     t        |   di | || _        || _        || _        || _        || _        || _        || _        || _	        || _
        || _        |	| _        || _        || _        || _        |
| _        y N )super__init__pad_token_idbos_token_ideos_token_id
vocab_sizehidden_sizeintermediate_sizeprojection_dimnum_hidden_layersnum_attention_headsmax_position_embeddingslayer_norm_eps
hidden_actinitializer_rangeinitializer_factorattention_dropout)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                    ]/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/clip/configuration_clip.pyr   zCLIPTextConfig.__init__W   s    * 	"6"((($&!2,!2#6 '>$,$!2"4!2    )i      i   r"         M   
quick_geluh㈵>        {Gz?      ?   i  i  __name__
__module____qualname____doc__
model_typebase_config_keyr   __classcell__r   s   @r    r   r      sP    :x #J#O  " %%3 %3r!   r   c                   F     e Zd ZdZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )CLIPVisionConfiga  
    This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
    CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            The number of input channels.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).

    Example:

    ```python
    >>> from transformers import CLIPVisionConfig, CLIPVisionModel

    >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPVisionConfig()

    >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPVisionModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```clip_vision_modelvision_configc                     t        |   di | || _        || _        || _        || _        || _        || _        || _        || _	        || _
        || _        || _        |
| _        |	| _        y r
   )r   r   r   r   r   r   r   num_channels
patch_size
image_sizer   r   r   r   r   )r   r   r   r   r   r   r:   r<   r;   r   r   r   r   r   r   r   s                  r    r   zCLIPVisionConfig.__init__   sz    " 	"6"&!2,!2#6 ($$!2"4!2,$r!   )i   i   r"   r#   r#   r          r&   r'   r(   r)   r*   r,   r4   s   @r    r6   r6      sH    4l %J%O % %r!   r6   c                   4     e Zd ZdZdZeedZ	 d fd	Z xZ	S )
CLIPConfigaO  
    [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate
    a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
    a configuration with the defaults will yield a similar configuration to that of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPTextConfig`].
        vision_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
            The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation.
        kwargs (*optional*):
            Dictionary of keyword arguments.

    Example:

    ```python
    >>> from transformers import CLIPConfig, CLIPModel

    >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPConfig()

    >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config

    >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
    >>> from transformers import CLIPTextConfig, CLIPVisionConfig

    >>> # Initializing a CLIPText and CLIPVision configuration
    >>> config_text = CLIPTextConfig()
    >>> config_vision = CLIPVisionConfig()

    >>> config = CLIPConfig(text_config=config_text, vision_config=config_vision)
    ```clip)r   r8   c                 4   |j                  dd       }|j                  dd       }||i }t        di |j                         }|j                         D ]B  \  }	}
|	|v s|
||	   k7  s|	dk7  s|	|v r
d|	 d|	 d}nd|	 d}t        j                  |       D |j                  |       ||i }t        di |j                         }d	|v r3|d	   j                         D 	
ci c]  \  }	}
t        |	      |
 c}
}	|d	<   |j                         D ]B  \  }	}
|	|v s|
||	   k7  s|	dk7  s|	|v r
d|	 d
|	 d}nd|	 d}t        j                  |       D |j                  |       | t               }t        j                  d       nt        |t              rt        di |}| t               }t        j                  d       nt        |t              rt        di |}|| _        || _        || _        || _        d| _        t!        | D  di | y c c}
}	w )Ntext_config_dictvision_config_dicttransformers_version`zp` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["z"]` will be used instead.zj`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The value `text_config["z"]` will be overridden.id2labelzv` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["zp`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. The value `vision_config["zO`text_config` is `None`. initializing the `CLIPTextConfig` with default values.zS`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.r*   r   )popr   to_dictitemsloggerinfoupdater6   str
isinstancedictr   r8   r   logit_scale_init_valuer   r   r   )r   r   r8   r   rQ   r   rC   rD   _text_config_dictkeyvaluemessage_vision_config_dictr   s                r    r   zCLIPConfig.__init__  s    "::&8$?#ZZ(<dC
 '"  !/ B1A B J J L 0557 )
U+%%;s3C*COeHe..u %<<?5@Y[  336%7NP   KK()" 01)$ " #3"H5G"H"P"P"R006I*6U6[6[6]3(2UCHeO3#J/
 2779 )
U-'E]35G,GCSiLi00u %FFIUJce  99<=TV   KK()"   !45(*KKKijT*(7;7K ,.MKKmnt,,=}=M&*,&<#"%"6"U3s   $H)NNr"   g/L
F@)
r-   r.   r/   r0   r1   r   r6   sub_configsr   r3   r4   s   @r    r@   r@      s.    +Z J"0CSTK `fY# Y#r!   r@   )r@   r   r6   N)r0   configuration_utilsr   utilsr   
get_loggerr-   rK   r   r6   r@   __all__r   r!   r    <module>r\      s[     3  
		H	%e3% e3PY%' Y%xJ#! J#Z ?r!   