
    qiL                        d dl Z d dl mZ ddlmZmZ ddlmZ ddlmZ ddl	m
Z
 ddlmZ dd	lmZmZmZmZmZ d
dlmZmZ d
dlmZ d
dlmZ d
dlmZmZmZmZmZ  ej@                  e!      Z" G d de      Z# G d de      Z$ G d de      Z% G d de      Z& G d de      Z' G d de      Z( G d de      Z) G d de      Z* G d d e      Z+g d!Z,y)"    N)nn   )CacheDynamicCache)GenerationConfig)FlashAttentionKwargs)BaseModelOutputWithPooling)Unpack)TransformersKwargsauto_docstringcan_return_tupleloggingtorch_compilable_check   )Idefics3ConfigIdefics3VisionConfig)Idefics3ImageProcessor)Idefics3ImageProcessorFast)Idefics3BaseModelOutputWithPast Idefics3ForConditionalGenerationIdefics3ModelIdefics3PreTrainedModelIdefics3VisionTransformerc                       e Zd ZdZdZy)SmolVLMVisionConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a
    SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
    [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct).

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1152):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input images.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer
    >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig

    >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration
    >>> configuration = SmolVLMVisionConfig()

    >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration
    >>> model = SmolVLMVisionTransformer(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlm_visionN__name__
__module____qualname____doc__
model_type     ]/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/smolvlm/modular_smolvlm.pyr   r   (   s    1f "Jr$   r   c                       e Zd Zy)SmolVLMPreTrainedModelNr   r   r    r#   r$   r%   r'   r'   _       r$   r'   c                       e Zd Zy)SmolVLMVisionTransformerNr(   r#   r$   r%   r+   r+   c   r)   r$   r+   c                       e Zd ZdZdZy)SmolVLMConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMModel`]. It is used to instantiate a
    SmolVLM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the model of the SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cache the key/value pairs of the attention mechanism. Only
            relevant if `config.is_decoder=True`.
        image_token_id (`int`, *optional*, defaults to 128257):
            The id of the "image" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the word embeddings with the token embeddings.
        vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
            Custom vision config or dict for the vision tower
        text_config (`PreTrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
            Custom text config or dict for the text model
        scale_factor (`int`, *optional*, defaults to 2):
            The scale factor for the image encoder.
        pad_token_id (`int`, *optional*, defaults to 128002):
            The id of the padding token.

    Example:
    ```python
    >>> from transformers import SmolVLMModel, SmolVLMConfig
    >>> # Initializing configuration
    >>> configuration = SmolVLMConfig()
    >>> # Initializing a model from the configuration
    >>> model = SmolVLMModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlmNr   r#   r$   r%   r-   r-   g   s    #J Jr$   r-   c                       e Zd Zy)SmolVLMImageProcessorNr(   r#   r$   r%   r0   r0      r)   r$   r0   c                       e Zd Zy)SmolVLMImageProcessorFastNr(   r#   r$   r%   r2   r2      r)   r$   r2   c                       e Zd Zy)SmolVLMBaseModelOutputWithPastNr(   r#   r$   r%   r4   r4      r)   r$   r4   c            "       T   e Zd ZdZdej
                  dej                  dej                  fdZe e	d      	 dd	ej                  d
ej
                  dz  dee   deez  fd              Ze e	d      	 	 	 	 	 	 	 	 	 	 	 	 	 ddej
                  dz  dej                  dz  dej
                  dz  dedz  dej                  dz  d	ej                  dz  d
ej"                  dz  dej                  dz  dedz  dedz  dedz  dedz  dej
                  dz  dee   deez  fd              Zy)SmolVLMModelz
    A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger
    in forward. Instead, we override inputs_merger here with custom logic.
    	input_idsinputs_embedsimage_hidden_statesc                     |j                   \  }}}|a| | j                         t        j                  | j                  j
                  t        j                  |j                              k(  }|d   }n|| j                  j
                  k(  }|j                  d      }t        t        j                  ||z  dk(        d       ||z  }t        j                  j                  j                  |j                  d      dd      }	|	d d	 }
|j                  d	      }|dz
  |z  }|dz
  |z  }|
j                  d      |z   }t        j                   |      }|||   ||   d d f   ||<   t        j"                  |j                  d	      ||      }|S )
Ndtypedevice).r      dimr   zCAt least one sample has <image> tokens not divisible by patch_size.)r>   r   )value)shapeget_input_embeddingstorchtensorconfigimage_token_idlongr=   sumr   allr   
functionalpadcumsum	unsqueeze
zeros_likewhere)selfr7   r8   r9   _
patch_size
image_masknum_image_tokensblocks_per_sampleoffsetsblock_offsetrow_cum	chunk_idx	local_idx	block_idximage_embedsmerged_embedss                    r%   inputs_mergerzSmolVLMModel.inputs_merger   s    /44:q&*E$*C*C*ET[[77uzzR_RfRfg+ J $F+J"dkk&@&@@J%>>a>0II&3q89Q	
 -
:((%%))*;*B*Bq*B*I6YZ)[s|###+q[Z/	q[J.	 **1-	9	''6#6y7LiXbNcef7f#gZ J$8$8$<lMZr$   zVEncodes images into continuous embeddings that can be forwarded to the language model.)custom_introNpixel_valuespixel_attention_maskkwargsreturnc                    |j                   \  }}}}}|j                  | j                        } |j                  ||z  g|j                   dd  }|j                   dd j	                         }	|dk(  j                  d      |	k7  }
|
dxx   t        j                  |
       z  cc<   ||
   j                         }|Lt        j                  d	D cg c]  }|j                   |    c}t        j                  |j                  
      }n6 |j                  ||z  g|j                   dd  }||
   j                         }| j                  j                  j                  }|j                  d||      }|j                  d||      }|j                  d      dkD  j                         } | j                   d||dd|}|j"                  }| j%                  |      }||_        |S c c}w )a4  
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
            The tensors corresponding to the input images.
        pixel_attention_mask (`torch.LongTensor`, *optional*):
            The attention mask indicating padded regions in the image.
        )r<   r   Nr>   g        )rB   r?   r   )r   r   r   )sizer<   r=   )	dimensionri   step)rB   rg   T)rb   patch_attention_maskreturn_dictr#   )rC   tor<   viewnumelrJ   rE   any
contiguousonesboolr=   rG   vision_configrT   unfoldvision_modellast_hidden_state	connectorpooler_output)rR   rb   rc   rd   
batch_size
num_imagesnum_channelsheightwidthnb_values_per_imagereal_images_indsirT   patches_subgridrl   image_outputsr9   image_featuress                     r%   get_image_featureszSmolVLMModel.get_image_features   s     ?K>P>P;
Jfe#TZZ8(|((j)@Z<CUCUVWVXCYZ +004::<(C/444FJ]] 			*: ;;;#$45@@B'#(::5>?l((+?jj#**$  $=#7#<#<Z*=T#vWkWqWqrsrtWu#v #78H#I#T#T#V [[..99
.55
Yc5d)001:T^0_ / 3 3 3 AA EKKM *)) 
%<P^b
fl
 ,== (;<&4#/ @s   G&a  
        Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
        the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
        max_num_images is the maximum number of images among the batch_size samples in the batch.
        Padding images are not needed beyond padding the pixel_values at the entrance of the model.
        For efficiency, we only pass through the vision_model's forward the real images by
        discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
        image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
        attention_maskposition_idspast_key_values	use_cacheoutput_attentionsoutput_hidden_statesrm   cache_positionc                    |
|
n| j                   j                  }
||n| j                   j                  }|	|	n| j                   j                  }	||n| j                   j                  }| j
                  r/| j                  j                  r|	rt        j                  d       d}	||j                  \  }}n||j                  \  }}}nt        d      |	r|t        | j                         }|9 | j                  j                         |      j                  |j                        }||t        d      |:| j!                  ||d      j"                  }|j                  |j                        }n)|'|j                  | j$                  |j                        }|| j'                  |||	      } | j                  d|||||	|
|d|d
	|}t)        |j*                  |j,                  |j.                  |j0                  |      S )NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz5You have to specify either input_ids or inputs_embeds)rG   zMYou cannot specify both pixel_values and image_hidden_states at the same timeT)rm   r;   )r7   r8   r9   )	r8   r   r   r   r   r   r   rm   r   )rx   r   hidden_states
attentionsr9   r#   )rG   r   r   r   use_return_dicttraining
text_modelgradient_checkpointingloggerwarning_oncerC   
ValueErrorr   rD   rn   r=   r   rz   r<   r`   r4   rx   r   r   r   )rR   r7   r   r   r   r8   rb   rc   r9   r   r   r   rm   r   rd   r{   
seq_lengthrS   outputss                      r%   forwardzSmolVLMModel.forward   s3   : 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]==T__CC	l I  %.__"J
&(5(;(;%J
ATUU0*$++>O BDOO@@B9MPPQZQaQabM #(;(Glmm#"&"9"92 #: #m   #6"8"89M9M"N ,"5"8"8tzzR_RfRf"8"g* !..#+$7 / M "$// 
')%+/!5)
 
 .%77#33!//)) 3
 	
r$   )N)NNNNNNNNNNNNN)r   r   r    r!   rE   
LongTensorTensorr`   r   r   FloatTensorr
   r   tupler	   r   r   
BoolTensorrt   r   r4   r   r#   r$   r%   r6   r6      s   
)):?,,]b]i]iB m 9=2''2 $..52 +,	2
 
+	+2 2h 
 .2.204(,26158<8<!%)-,0#'26T
##d*T
 t+T
 &&-	T

 T
 ((4/T
 ''$.T
 $..5T
 #..5T
 $;T
  $;T
 #TkT
 D[T
 ((4/T
 -.T
  
/	/!T

 T
r$   r6   c                   0     e Zd ZddiZ fdZ fdZ xZS )SmolVLMForConditionalGenerationzlm_head.weightz$model.text_model.embed_tokens.weightc                 J   t         |   |       t        |      | _        t	        j
                  |      | j                  j                  _        t        j                  |j                  j                  |j                  j                  d      | _        | j                          y )NF)bias)super__init__r6   modelr   from_model_configr   generation_configr   Lineartext_confighidden_size
vocab_sizelm_head	post_init)rR   rG   	__class__s     r%   r   z(SmolVLMForConditionalGeneration.__init__a  sq     !&)
2B2T2TU[2\

/yy!3!3!?!?ASASA^A^ejkr$   c                 $    t        |   di | y)a	  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
            The hidden states of the image encoder after modality projection.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are
            ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> import httpx
        >>> from io import BytesIO
        >>> import torch
        >>> from PIL import Image
        >>> from io import BytesIO

        >>> from transformers import AutoProcessor, AutoModelForImageTextToText
        >>> from transformers.image_utils import load_image

        >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
        >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
        >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
        >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

        >>> processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct")
        >>> model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct", dtype=torch.bfloat16, device_map="auto")

        >>> # Create inputs
        >>> messages = [
        ...     {
        ...         "role": "user",
        ...         "content": [
        ...             {"type": "video", "path": path/to/video},
        ...             {"type": "text", "text": "What is happening in this video?"},
        ...         ]
        ...     }
        ... ]

        >>> inputs = processor.apply_chat_template([messages], add_generation_prompt=True)

        >>> # Generate
        >>> generated_ids = model.generate(**inputs, max_new_tokens=256)
        >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

        >>> print(generated_texts)
        ```Nr#   )r   r   )rR   super_kwargsr   s     r%   r   z'SmolVLMForConditionalGeneration.forwardh  s    d 	','r$   )r   r   r    _tied_weights_keysr   r   __classcell__)r   s   @r%   r   r   ^  s    *,RS2( 2(r$   r   )r   r-   r0   r2   r   r'   r6   r+   )-rE   r   cache_utilsr   r   
generationr   modeling_flash_attention_utilsr   modeling_outputsr	   processing_utilsr
   utilsr   r   r   r   r   idefics3.configuration_idefics3r   r   "idefics3.image_processing_idefics3r   'idefics3.image_processing_idefics3_fastr   idefics3.modeling_idefics3r   r   r   r   r   
get_loggerr   r   r   r'   r+   r-   r0   r2   r4   r6   r   __all__r#   r$   r%   <module>r      s       . * B : & j j R G P  
		H	%4". 4"n	4 		8 	&N &R	2 		 : 		%D 	
= 
D<(&F <(~	r$   