
    qi3e                        d Z ddlmZ ddlZddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZmZmZ dd
lmZmZ ddlmZ ddlmZmZmZmZ ddlmZmZ ddlmZ ddlm Z   ejB                  e"      Z# G d dejH                        Z% G d dejH                        Z&	 	 d7dejH                  dejN                  dejN                  dejN                  dejN                  dz  de(dz  de(dee   fdZ) G d dejH                        Z* G d  d!ejH                        Z+ G d" d#ejH                        Z, G d$ d%ejH                        Z- G d& d'ejH                        Z. G d( d)e      Z/ G d* d+ejH                        Z0 G d, d-ejH                        Z1e G d. d/e             Z2e G d0 d1e2             Z3 ed23       G d4 d5e2             Z4g d6Z5y)8zPyTorch ViViT model.    )CallableN)nn   )initialization)ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringlogging	torch_int)can_return_tuplemerge_with_config_defaults)capture_outputs   )VivitConfigc                   f     e Zd ZdZdef fdZddej                  dedej                  fdZ	 xZ
S )	VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    configc                    t         |           |j                  | _        |j                  | _        |j                  | _        | j                  | j
                  d   z  | j                  | j
                  d   z  z  | j                  | j
                  d   z  z  | _        |j                  | _        t        j                  |j                  |j                  |j                  |j                        | _        y )N   r   r   )kernel_sizestride)super__init__
num_frames
image_sizetubelet_size
patch_sizenum_patcheshidden_size	embed_dimr   Conv3dnum_channels
projectionselfr   	__class__s     Z/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.pyr    zVivitTubeletEmbeddings.__init__/   s     ++ ++ --__ 22$//!"446$//!"446 	
  ++))!3!3ATAT]c]p]p
    pixel_valuesinterpolate_pos_encodingreturnc                 \   |j                   \  }}}}}|sP|| j                  k7  s|| j                  k7  r2t        d| d| d| j                  d    d| j                  d    d	      |j                  ddddd	      }| j	                  |      }|j                  d      j                  dd      }|S )
NzImage image size (*z) doesn't match model (r   r   z).r   r      )shaper"   
ValueErrorpermuter*   flatten	transpose)	r,   r0   r1   
batch_sizer!   r)   heightwidthxs	            r.   forwardzVivitTubeletEmbeddings.forward?   s    >J>P>P;
Jfe'Vt-F%SWSbSbJb$VHAeW4KDOO\]L^K__`aeapapqras`ttvw 
 $++Aq!Q:OOL) IIaL""1a(r/   F)__name__
__module____qualname____doc__r   r    torchTensorboolr?   __classcell__r-   s   @r.   r   r   $   s9    
{ 
 ELL D ]b]i]i r/   r   c                        e Zd ZdZdef fdZdej                  dededej                  fdZ	dd	ej                  d
e
dej                  fdZ xZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    r   c                    t         |           t        j                  t	        j
                  dd|j                              | _        t        |      | _	        t        j                  t	        j
                  d| j                  j                  dz   |j                              | _        t        j                  |j                        | _        |j                  dd  | _        || _        y )Nr   )r   r    r   	ParameterrE   zerosr&   	cls_tokenr   patch_embeddingsr%   position_embeddingsDropouthidden_dropout_probdropoutr#   r$   r   r+   s     r.   r    zVivitEmbeddings.__init__W   s    ekk!Q8J8J&KL 6v >#%<<KK400<<q@&BTBTU$
  zz&"<"<= --ab1r/   
embeddingsr<   r=   r2   c                    |j                   d   dz
  }| j                  j                   d   dz
  }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  ddddf   }| j                  ddddf   }|j                   d   }|| j
                  d   z  }	|| j
                  d   z  }
t        |dz        }|j                  d|||      }|j                  dddd      }t        j                  j                  ||	|
fdd	
      }|j                  dddd      j                  dd|      }t        j                  ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   Nr   g      ?r   r   bicubicF)sizemodealign_cornersdim)r6   rQ   rE   jit
is_tracingr$   r   reshaper8   r   
functionalinterpolateviewcat)r,   rU   r<   r=   r%   num_positionsclass_pos_embedpatch_pos_embedr]   
new_height	new_widthsqrt_num_positionss               r.   r1   z(VivitEmbeddings.interpolate_pos_encodinge   sj    !&&q)A-0066q9A= yy##%+*F6UZ?+++221bqb59221ab59r"tq11
T__Q//	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr/   r0   r1   c                 0   |j                   \  }}}}}| j                  ||      }| j                  j                  |ddg      }	t	        j
                  |	|fd      }|r|| j                  |||      z   }n|| j                  z   }| j                  |      }|S )Nr1   r   r\   )	r6   rP   rO   tilerE   rd   r1   rQ   rT   )
r,   r0   r1   r;   r!   r)   r<   r=   rU   
cls_tokenss
             r.   r?   zVivitEmbeddings.forward   s    >J>P>P;
Jfe**<Rj*k
^^((*a);<
YY
J7Q?
 $#d&C&CJPVX]&^^J#d&>&>>J\\*-
r/   r@   )rA   rB   rC   rD   r   r    rE   rF   intr1   rG   r?   rH   rI   s   @r.   rK   rK   P   sl    { &D5<< &D &DUX &D]b]i]i &DPELL D ]b]i]i r/   rK   modulequerykeyvalueattention_maskscalingrT   kwargsc                    ||j                  d      dz  }t        j                  ||j                  dd            |z  }|||z   }t        j
                  j                  |d      }t        j
                  j                  ||| j                        }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )NrW         r   r   r\   )ptrainingr   )
rY   rE   matmulr:   r   ra   softmaxrT   rz   
contiguous)
rp   rq   rr   rs   rt   ru   rT   rv   attn_weightsattn_outputs
             r.   eager_attention_forwardr      s     **R.D( <<s}}Q':;gEL!#n4==((2(>L==((6??([L,,|U3K''1-88:K$$r/   c                   z     e Zd Zdef fdZdej                  deej                  ej                  f   fdZ xZ	S )VivitSelfAttentionr   c                 2   t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      || _        |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        |j                  | _        | j                  dz  | _        d| _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .rx   F)bias)r   r    r&   num_attention_headshasattrr7   r   ro   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probru   	is_causalr   Linearqkv_biasrq   rr   rs   r+   s     r.   r    zVivitSelfAttention.__init__   sF    : ::a?PVXhHi"6#5#5"6 7334A7 
 #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EFOO\
99V//1C1C&//ZYYv1143E3EFOO\
r/   hidden_statesr2   c           
         |j                   d   }|d| j                  | j                  f} | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      }t        j                  | j                  j                  t              } || |||d | j                  | j                  | j                  sdn| j                         \  }}	|j#                         d d | j$                  fz   }
|j'                  |
      }||	fS )Nr   rW   r   r           )r   ru   rT   )r6   r   r   rr   rc   r:   rs   rq   r   get_interfacer   _attn_implementationr   r   ru   rz   r   rY   r   r`   )r,   r   r;   	new_shape	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapes              r.   r?   zVivitSelfAttention.forward   sF   "((+
D$<$<d>V>VV	0DHH]+00)<FFq!L	4djj/44i@JJ1aP4djj/44i@JJ1aP(?(M(MKK,,.E)
 *=nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EFo--r/   )
rA   rB   rC   r   r    rE   rF   tupler?   rH   rI   s   @r.   r   r      s:    ]{ ](.U\\ .eELL%,,<V6W .r/   r   c                   x     e Zd ZdZdef fdZdej                  dej                  dej                  fdZ xZ	S )VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r   c                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y N)	r   r    r   r   r&   denserR   rS   rT   r+   s     r.   r    zVivitSelfOutput.__init__   sB    YYv1163E3EF
zz&"<"<=r/   r   input_tensorr2   c                 J    | j                  |      }| j                  |      }|S r   r   rT   r,   r   r   s      r.   r?   zVivitSelfOutput.forward   s$    

=1]3r/   
rA   rB   rC   rD   r   r    rE   rF   r?   rH   rI   s   @r.   r   r      s=    
>{ >
U\\  RWR^R^ r/   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )VivitAttentionr   c                 b    t         |           t        |      | _        t	        |      | _        y r   )r   r    r   	attentionr   outputr+   s     r.   r    zVivitAttention.__init__  s&    +F3%f-r/   r   r2   c                 R    | j                  |      \  }}| j                  ||      }|S r   )r   r   )r,   r   self_attn_output_r   s        r.   r?   zVivitAttention.forward  s,    "nn];!-}=r/   	rA   rB   rC   r   r    rE   rF   r?   rH   rI   s   @r.   r   r     s*    .{ .
U\\ ell r/   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )VivitIntermediater   c                 P   t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )r   r    r   r   r&   intermediate_sizer   rR   rS   rT   
isinstance
hidden_actstrr   intermediate_act_fnr+   s     r.   r    zVivitIntermediate.__init__  ss    YYv1163K3KL
zz&"<"<=f''-'-f.?.?'@D$'-'8'8D$r/   r   r2   c                 l    | j                  |      }| j                  |      }| j                  |      }|S r   )r   r   rT   )r,   r   s     r.   r?   zVivitIntermediate.forward  s4    

=100?]3r/   r   rI   s   @r.   r   r     s*    9{ 9U\\ ell r/   r   c                   t     e Zd Zdef fdZdej                  dej                  dej                  fdZ xZS )VivitOutputr   c                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y r   )
r   r    r   r   r   r&   r   rR   rS   rT   r+   s     r.   r    zVivitOutput.__init__!  sB    YYv779K9KL
zz&"<"<=r/   r   r   r2   c                 T    | j                  |      }| j                  |      }||z   }|S r   r   r   s      r.   r?   zVivitOutput.forward&  s.    

=1]3%4r/   r   rI   s   @r.   r   r      s8    >{ >
U\\  RWR^R^ r/   r   c                   `     e Zd ZdZdef fdZdej                  dej                  fdZ xZ	S )
VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.r   c                 r   t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)r   r    chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormr&   layer_norm_epslayernorm_beforelayernorm_afterr+   s     r.   r    zVivitLayer.__init__0  s    '-'E'E$'/-f5!&) "V-?-?VEZEZ [!||F,>,>FDYDYZr/   r   r2   c                     | j                  |      }| j                  |      }||z   }| j                  |      }| j                  |      }| j	                  ||      }|S r   )r   r   r   r   r   )r,   r   hidden_states_normattention_outputlayer_outputs        r.   r?   zVivitLayer.forward:  si    !22=A>>*<= )=8 ++M:((6 {{<?r/   r   rI   s   @r.   r   r   -  s/    X[{ [U\\ ell r/   r   c                   H     e Zd Zdef fdZdej                  defdZ xZ	S )VivitEncoderr   c                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w NF)
r   r    r   r   
ModuleListrangenum_hidden_layersr   layergradient_checkpointing)r,   r   r   r-   s      r.   r    zVivitEncoder.__init__L  sN    ]]fF^F^@_#`1Jv$6#`a
&+# $as   A#r   r2   c                 d    t        | j                        D ]  \  }} ||      } t        |      S )N)last_hidden_state)	enumerater   r	   )r,   r   ilayer_modules       r.   r?   zVivitEncoder.forwardR  s5    (4 	8OA|(7M	8 ??r/   )
rA   rB   rC   r   r    rE   rF   r	   r?   rH   rI   s   @r.   r   r   K  s)    ,{ ,@U\\ @o @r/   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )VivitPoolerr   c                     t         |           t        j                  |j                  |j                        | _        t        j                         | _        y r   )r   r    r   r   r&   r   Tanh
activationr+   s     r.   r    zVivitPooler.__init__Z  s9    YYv1163E3EF
'')r/   r   r2   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   r   )r,   r   first_token_tensorpooled_outputs       r.   r?   zVivitPooler.forward_  s6     +1a40

#566r/   r   rI   s   @r.   r   r   Y  s*    ${ $
U\\ ell r/   r   c                        e Zd ZU eed<   dZdZdZdZdgZ	dZ
dZdZdZeedZ ej$                          fd       Z xZS )	VivitPreTrainedModelr   vivitr0   videoTr   )r   
attentionsc                     t         |   |       t        |t              r?t	        j
                  |j                         t	        j
                  |j                         yy)zInitialize the weightsN)r   _init_weightsr   rK   initzeros_rO   rQ   )r,   rp   r-   s     r.   r   z"VivitPreTrainedModel._init_weightsy  sE     	f%fo.KK(()KK223 /r/   )rA   rB   rC   r   __annotations__base_model_prefixmain_input_nameinput_modalitiessupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsrE   no_gradr   rH   rI   s   @r.   r   r   h  si    $O&*#%N"&#(
 U]]_4 4r/   r   c                        e Zd Zddedef fdZd Ze ed      e		 	 dde
j                  dz  d	ed
ee   defd                     Z xZS )
VivitModelr   add_pooling_layerc                    t         |   |       || _        t        |      | _        t        |      | _        t        j                  |j                  |j                        | _        |rt        |      nd| _        | j                          y)zv
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        r   N)r   r    r   rK   rU   r   encoderr   r   r&   r   	layernormr   pooler	post_init)r,   r   r   r-   s      r.   r    zVivitModel.__init__  sk    
 	 )&1#F+f&8&8f>S>ST->k&)D 	r/   c                 .    | j                   j                  S r   )rU   rP   )r,   s    r.   get_input_embeddingszVivitModel.get_input_embeddings  s    ///r/   F)tie_last_hidden_statesNr0   r1   rv   r2   c                     |t        d      | j                  ||      }| j                  |      }|j                  }| j	                  |      }| j
                  | j                  |      nd}t        ||      S )a  
        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesrl   )r   pooler_output)r7   rU   r   r   r   r   r
   )r,   r0   r1   rv   embedding_outputencoder_outputssequence_outputr   s           r.   r?   zVivitModel.forward  s{    h ?@@??<Rj?k+/<<8H+I);;..98<8OO4UY)O[hiir/   )Tr   )rA   rB   rC   r   rG   r    r  r   r   r   rE   FloatTensorr   r   r
   r?   rH   rI   s   @r.   r   r     s    { t "0  E2 26).Zj''$.Zj #'Zj +,	Zj
 
$Zj  3  Zjr/   r   a  
        ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
    [CLS] token) e.g. for Kinetics-400.

        <Tip>

            Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
            setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
            position embeddings to the higher resolution.

        </Tip>
    )custom_introc                        e Zd Zdef fdZee	 	 	 d
dej                  dz  dej                  dz  de
dee   def
d	              Z xZS )VivitForVideoClassificationr   c                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NF)r   r   )r   r    
num_labelsr   r   r   r   r&   Identity
classifierr  r+   s     r.   r    z$VivitForVideoClassification.__init__  ss      ++%@
 OUN_N_bcNc"))F$6$68I8IJikititiv 	r/   Nr0   labelsr1   rv   r2   c                     | j                   |fd|i|}|j                  }| j                  |dddddf         }d}| | j                  ||| j                  fi |}t        |||j                  |j                        S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```r1   Nr   )losslogitsr   r   )r   r   r  loss_functionr   r   r   r   )	r,   r0   r  r1   rv   outputsr	  r  r  s	            r.   r?   z#VivitForVideoClassification.forward  s    x $.4::$
3K$
OU$
 "33Aq!9:%4%%ffdkkLVLD$!//))	
 	
r/   )NNF)rA   rB   rC   r   r    r   r   rE   r
  
LongTensorrG   r   r   r   r?   rH   rI   s   @r.   r  r    s    
{ 
  26*.).	i
''$.i
   4'i
 #'	i

 +,i
 
i
  i
r/   r  )r   r   r  )Nr   )6rD   collections.abcr   rE   r    r   r   activationsr   modeling_layersr   modeling_outputsr	   r
   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.genericr   r   utils.output_capturingr   configuration_vivitr   
get_loggerrA   loggerModuler   rK   rF   floatr   r   r   r   r   r   r   r   r   r   r   r  __all__ r/   r.   <module>r*     s    $   & ! 9 b b F & K K I 5 , 
		H	%)RYY )XLbii Ll !%II%<<% 
% <<	%
 LL4'% T\% % '(%:/. /.fbii $	RYY 			 $
")) 
+ <@299 @"))  4? 4 42 rj% rj rjj x
"6 x
x
v Pr/   