
    qi                        d dl Z d dlZd dlmZ d dlZd dlZd dlmZ d dlm	Z	 ddl
mZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZmZmZmZmZmZ ddl m!Z!m"Z" ddl#m$Z$ ddl%m&Z&m'Z'm(Z( ddl)m*Z*  G d de      Z+ G d dejX                        Z- G d dejX                        Z. G d dejX                        Z/ G d dejX                        Z0 G d dejX                        Z1	 	 dPdejX                  d ejd                  d!ejd                  d"ejd                  d#ejd                  dz  d$e3dz  d%e3d&e$e&   fd'Z4 G d( d)ejX                        Z5 G d* d+ejX                        Z6 G d, d-e      Z7 G d. d/ejX                        Z8 G d0 d1ejX                        Z9 G d2 d3ejX                        Z:e' G d4 d5e"             Z;	 	 dQd6e<e=e=f   d7e3d8e=d#ej|                  dz  d9e=d:ej~                  fd;Z@eZAe' G d< d=e;             ZBd>ZC e'd?@       G dA dBe;             ZD e'dC@       G dD dEe;             ZEe' G dF dGe;             ZF G dH dIejX                        ZG G dJ dKejX                        ZH e'dL@       G dM dNe;             ZIg dOZJy)R    N)Callable)nn)CrossEntropyLoss   )initialization)ACT2FN)is_deepspeed_zero3_enabled)is_fsdp_managed_module)create_bidirectional_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputCausalLMOutputSequenceClassifierOutputTokenClassifierOutputWav2Vec2BaseModelOutputXVectorOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringis_peft_available   )Data2VecAudioConfigc                   &     e Zd Zd fd	Zd Z xZS )Data2VecAudioConvLayerc                    t         |           |dkD  r|j                  |dz
     nd| _        |j                  |   | _        t        j                  | j                  | j                  |j                  |   |j                  |   |j                        | _
        t        j                  | j                  d      | _        t        |j                     | _        y )Nr   r   )kernel_sizestridebiasTelementwise_affine)super__init__conv_dimin_conv_dimout_conv_dimr   Conv1dconv_kernelconv_stride	conv_biasconv	LayerNorm
layer_normr   feat_extract_activation
activationselfconfiglayer_id	__class__s      f/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/data2vec/modeling_data2vec_audio.pyr%   zData2VecAudioConvLayer.__init__4   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 ,,t'8'8TR !?!?@    c                     | j                  |      }|j                  dd      }| j                  |      }|j                  dd      }| j                  |      }|S )N)r-   	transposer/   r1   r3   hidden_statess     r7   forwardzData2VecAudioConvLayer.forwardC   sV    		-0%//B76%//B76r8   r   __name__
__module____qualname__r%   r?   __classcell__r6   s   @r7   r   r   3   s    Ar8   r   c                   $     e Zd Z fdZd Z xZS )Data2VecAudioPadLayerc                 P    t         |           |dz  dk(  rd| _        y d| _        y )N   r   r   )r$   r%   num_pad_remove)r3   num_conv_pos_embeddingsr6   s     r7   r%   zData2VecAudioPadLayer.__init__O   s)    #:Q#>!#Car8   c                 V    | j                   dkD  r|d d d d d | j                    f   }|S Nr   )rK   r=   s     r7   r?   zData2VecAudioPadLayer.forwardS   s6    ")!Q0F43F3F2F0F*FGMr8   rA   rF   s   @r7   rH   rH   N   s    Kr8   rH   c                   $     e Zd Z fdZd Z xZS ) Data2VecAudioPositionalConvLayerc                 z   t         |           t        j                  |j                  |j                  |j
                  |j
                  dz  |j                        | _        t        |j
                        | _	        t        |j                     | _        t        j                  |j                  d      | _        y )NrJ   )r   paddinggroupsFr"   )r$   r%   r   r)   hidden_sizeconv_pos_kernel_sizenum_conv_pos_embedding_groupsr-   rH   rR   r   r0   r1   r.   r/   r3   r4   r6   s     r7   r%   z)Data2VecAudioPositionalConvLayer.__init__Z   s    II33//1477
	 -V-H-HI !?!?@,,v'9'9eTr8   c                     | j                  |      }| j                  |      }|j                  dd      }| j                  |      }|j                  dd      }| j	                  |      }|S Nr   rJ   )r-   rR   r<   r/   r1   r=   s     r7   r?   z(Data2VecAudioPositionalConvLayer.forwardi   sd    		-0]3%//156%//156r8   rA   rF   s   @r7   rP   rP   Y   s    Ur8   rP   c                   $     e Zd Z fdZd Z xZS )$Data2VecAudioPositionalConvEmbeddingc                     t         |           t        j                  t	        |j
                        D cg c]  }t        |       c}      | _        y c c}w N)r$   r%   r   
ModuleListrangerL   rP   layersr3   r4   _r6   s      r7   r%   z-Data2VecAudioPositionalConvEmbedding.__init__u   s@    mm?DVEcEc?de!-f5e
es   Ac                     |j                  dd      }| j                  D ]
  } ||      } |j                  dd      }|S rY   )r<   r`   )r3   r>   layers      r7   r?   z,Data2VecAudioPositionalConvEmbedding.forward{   sI    %//15[[ 	1E!-0M	1%//15r8   rA   rF   s   @r7   r[   r[   t   s    
r8   r[   c                   .     e Zd ZdZ fdZd Zd Z xZS )Data2VecAudioFeatureEncoderz.Construct the features from raw audio waveformc           	          t         |           t        j                  t	        |j
                        D cg c]  }t        ||       c}      | _        d| _        d| _	        y c c}w )N)r5   FT)
r$   r%   r   r^   r_   num_feat_extract_layersr   conv_layersgradient_checkpointing_requires_grad)r3   r4   ir6   s      r7   r%   z$Data2VecAudioFeatureEncoder.__init__   sX    ==AFvGeGeAfgA#FQ7g
 ',#" hs   A%c                 J    | j                         D ]	  }d|_         d| _        y NF)
parametersrequires_gradrk   r3   params     r7   _freeze_parametersz.Data2VecAudioFeatureEncoder._freeze_parameters   s(    __& 	(E"'E	(#r8   c                     |d d d f   }| j                   r| j                  rd|_        | j                  D ]
  } ||      } |S )NT)rk   trainingrp   ri   )r3   input_valuesr>   
conv_layers       r7   r?   z#Data2VecAudioFeatureEncoder.forward   sP    $QW- 4==*.M'** 	6J&}5M	6 r8   )rB   rC   rD   __doc__r%   rs   r?   rE   rF   s   @r7   rf   rf      s    8#$

r8   rf   c                   $     e Zd Z fdZd Z xZS )Data2VecAudioFeatureProjectionc                 4   t         |           t        j                  |j                  d   |j
                        | _        t        j                  |j                  d   |j                        | _	        t        j                  |j                        | _        y )Nr;   eps)r$   r%   r   r.   r&   layer_norm_epsr/   LinearrT   
projectionDropoutfeat_proj_dropoutdropoutrW   s     r7   r%   z'Data2VecAudioFeatureProjection.__init__   sf    ,,vr':@U@UV))FOOB$79K9KLzz&":":;r8   c                 p    | j                  |      }| j                  |      }| j                  |      }||fS r]   )r/   r   r   )r3   r>   norm_hidden_statess      r7   r?   z&Data2VecAudioFeatureProjection.forward   s:    !__];(:;]3000r8   rA   rF   s   @r7   rz   rz      s    <1r8   rz   modulequerykeyvalueattention_maskscalingr   kwargsc                    ||j                  d      dz  }t        j                  ||j                  dd            |z  }|||z   }t        j
                  j                  |d      }t        j
                  j                  ||| j                        }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )Nr;         rJ   r   dim)pru   r   )
sizetorchmatmulr<   r   
functionalsoftmaxr   ru   
contiguous)
r   r   r   r   r   r   r   r   attn_weightsattn_outputs
             r7   eager_attention_forwardr      s     **R.D( <<s}}Q':;gEL!#n4==((2(>L==((6??([L,,|U3K''1-88:K$$r8   c                   (    e Zd ZdZ	 	 	 	 	 ddedededededed	edz  f fd
Z	 	 	 dde	j                  de	j                  dz  de	j                  dz  dedz  dee   dee	j                  e	j                  dz  ee	j                     dz  f   fdZ xZS )Data2VecAudioAttentionz=Multi-headed attention from 'Attention Is All You Need' paperN	embed_dim	num_headsr   
is_decoderr!   	is_causalr4   c                 
   t         |           || _        || _        || _        ||z  | _        || _        | j
                  |z  | j                  k7  rt        d| j                   d| d      | j
                  dz  | _        || _	        || _
        t        j                  |||      | _        t        j                  |||      | _        t        j                  |||      | _        t        j                  |||      | _        y )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).r   )r!   )r$   r%   r   r   r   head_dimr4   
ValueErrorr   r   r   r   r   k_projv_projq_projout_proj)	r3   r   r   r   r   r!   r   r4   r6   s	           r7   r%   zData2VecAudioAttention.__init__   s     	""!Y.MMI%$..8MdnnM]$YKr3  }}d*$"ii	94@ii	94@ii	94@		)YTBr8   r>   key_value_statesr   output_attentionsr   returnc                    |du}|j                   dd \  }}|r|j                   d   n|}	||d| j                  f}
||	d| j                  f} | j                  |      j                  |
 j	                  dd      }|r|n|} | j                  |      j                  | j	                  dd      } | j                  |      j                  | j	                  dd      }t        j                  | j                  j                  t              } || ||||f| j                  sdn| j                  | j                  |d|\  }}|j                  ||d      j!                         }| j#                  |      }||dfS )z#Input shape: Batch x Time x ChannelNr;   r   rJ           )r   r   r   )shaper   r   viewr<   r   r   r   get_interfacer4   _attn_implementationr   ru   r   r   reshaper   r   )r3   r>   r   r   r   r   is_cross_attentionbsztgt_lensrc_lenq_input_shapekv_input_shapequery_statescurrent_states
key_statesvalue_statesattention_interfacer   r   s                      r7   r?   zData2VecAudioAttention.forward   s    .T9 %**3B/W/A"((+wgr4==9wDMM: 7t{{=166FPPQRTUV-?)]5T[[055~FPPQRTUV
7t{{>277HRRSTVWX(?(M(MKK,,.E)
 %8
%
  $}}C$,,LL/
%
 
%
!\ "))#w;FFHmmK0L$..r8   )r   FTFN)NNF)rB   rC   rD   rx   intfloatboolr   r%   r   Tensorr   r   tupler?   rE   rF   s   @r7   r   r      s    G  -1CC C 	C
 C C C $d*CD 15.2).1/||1/  ,,-1/ t+	1/
  $;1/ -.1/ 
u||U\\D0%2E2LL	M1/r8   r   c                   $     e Zd Z fdZd Z xZS )Data2VecAudioFeedForwardc                    t         |           t        j                  |j                        | _        t        j                  |j                  |j                        | _	        t        |j                  t              rt        |j                     | _        n|j                  | _        t        j                  |j                  |j                        | _        t        j                  |j                         | _        y r]   )r$   r%   r   r   activation_dropoutintermediate_dropoutr   rT   intermediate_sizeintermediate_dense
isinstance
hidden_actstrr   intermediate_act_fnoutput_densehidden_dropoutoutput_dropoutrW   s     r7   r%   z!Data2VecAudioFeedForward.__init__"  s    $&JJv/H/H$I!"$))F,>,>@X@X"Yf''-'-f.?.?'@D$'-'8'8D$IIf&>&>@R@RS jj)>)>?r8   c                     | j                  |      }| j                  |      }| j                  |      }| j                  |      }| j	                  |      }|S r]   )r   r   r   r   r   r=   s     r7   r?   z Data2VecAudioFeedForward.forward/  sX    //>00?11-@))-8++M:r8   rA   rF   s   @r7   r   r   !  s    @r8   r   c                   &     e Zd Z fdZddZ xZS )Data2VecAudioEncoderLayerc                    t         |           t        |j                  |j                  |j
                  d|      | _        t        j                  |j                        | _
        t        j                  |j                  |j                        | _        t        |      | _        t        j                  |j                  |j                        | _        y )NF)r   r   r   r   r4   r|   )r$   r%   r   rT   num_attention_headsattention_dropout	attentionr   r   r   r   r.   r~   r/   r   feed_forwardfinal_layer_normrW   s     r7   r%   z"Data2VecAudioEncoderLayer.__init__:  s    /((00,,
 zz&"7"78,,v'9'9v?T?TU4V< "V-?-?VEZEZ [r8   c                     |}| j                  |||      \  }}}| j                  |      }||z   }| j                  |      }|| j                  |      z   }| j	                  |      }|f}|r||fz  }|S )Nr   r   )r   r   r/   r   r   )r3   r>   r   r   attn_residualr   rb   outputss           r7   r?   z!Data2VecAudioEncoderLayer.forwardI  s    %)-.L] *8 *
&|Q ]3%56%(9(9-(HH--m< "&Gr8   rn   rA   rF   s   @r7   r   r   9  s    \r8   r   c                   r     e Zd Z fdZ	 	 	 	 d	dej
                  dej                  dz  dededef
dZ xZ	S )
Data2VecAudioEncoderc                    t         |           || _        t        |      | _        t        j                  |j                  |j                        | _	        t        j                  |j                        | _        t        j                  t        |j                        D cg c]  }t!        |       c}      | _        d| _        y c c}w )Nr|   F)r$   r%   r4   r[   pos_conv_embedr   r.   rT   r~   r/   r   r   r   r^   r_   num_hidden_layersr   r`   rj   ra   s      r7   r%   zData2VecAudioEncoder.__init__^  s    B6J,,v'9'9v?T?TUzz&"7"78mmPUV\VnVnPo$p1%>v%F$pq&+# %qs   !CNr>   r   r   output_hidden_statesreturn_dictc                    |rdnd }|rdnd }|5|j                  d      j                  dd|j                  d         }d|| <   t        | j                  ||      }| j                  |      }	||	j                  |j                        z   }| j                  |      }| j                  |      }t               xs t        |       }
| j                  D ]j  }|r||fz   }t        j                  g       }| j                  xr || j                  j                   k  }|r|
r ||||      }|d   }|rd}|sb|d   fz   }l |r||fz   }|st#        d	 |||fD              S t%        |||
      S )N r;   r   rJ   r   )r4   inputs_embedsr   r   NNc              3   &   K   | ]	  }||  y wr]   r   ).0vs     r7   	<genexpr>z/Data2VecAudioEncoder.forward.<locals>.<genexpr>  s     mq_`_lms   )last_hidden_stater>   
attentions)	unsqueezerepeatr   r   r4   r   todevicer/   r   r	   r
   r`   r   randru   	layerdropr   r   )r3   r>   r   r   r   r   all_hidden_statesall_self_attentionsexpand_attention_maskposition_embeddingssynced_gpusrd   dropout_probabilityskip_the_layerlayer_outputss                  r7   r?   zData2VecAudioEncoder.forwardg  s    #7BD$5b4%$2$<$<R$@$G$G1mNaNabcNd$e!45M0012;;')
 #11-@%(;(>(>}?S?S(TT6]302R6LT6R[[ 	PE#$58H$H! #(**R.!]]Z/BT[[EZEZ/ZN![ %!.Te! !.a 0 , &9]1=M<O&O#'	P*   1]4D Dm]4EGZ$[mmm++*
 	
r8   )NFFT)
rB   rC   rD   r%   r   tensorr   r   r?   rE   rF   s   @r7   r   r   ]  sX    , /3"'%* ;
||;
 t+;
  	;

 #;
 ;
r8   r   c                   $     e Zd Z fdZd Z xZS )Data2VecAudioAdapterLayerc                     t         |           t        j                  |j                  d|j                  z  |j
                  |j                  d      | _        y )NrJ   r   )r    rR   )r$   r%   r   r)   output_hidden_sizeadapter_kernel_sizeadapter_strider-   rW   s     r7   r%   z"Data2VecAudioAdapterLayer.__init__  sJ    II%%)))&&((
	r8   c                 j    | j                  |      }t        j                  j                  |d      }|S )Nr   r   )r-   r   r   glur=   s     r7   r?   z!Data2VecAudioAdapterLayer.forward  s/    		-0))-Q)?r8   rA   rF   s   @r7   r   r     s    
r8   r   c                   $     e Zd Z fdZd Z xZS )Data2VecAudioAdapterc                    t         |           j                  j                  k7  rTt	        j
                  j                  j                        | _        t	        j                  j                        | _        nd x| _        | _        t	        j                  fdt        j                        D              | _        j                  | _        y )Nc              3   4   K   | ]  }t                y wr]   )r   )r   rb   r4   s     r7   r   z0Data2VecAudioAdapter.__init__.<locals>.<genexpr>  s     #p!$=f$E#ps   )r$   r%   r   rT   r   r   projr.   proj_layer_normr^   r_   num_adapter_layersr`   r   rW   s    `r7   r%   zData2VecAudioAdapter.__init__  s     $$(:(::		&"4"4f6O6OPDI#%<<0I0I#JD /33DI,mm#puU[UnUnOo#pp))r8   c                 h   | j                   .| j                  "| j                  |      }| j                  |      }|j                  dd      }| j                  D ]D  }t        j
                  j                         }| j                  r|| j                  kD  s= ||      }F |j                  dd      }|S rY   )r	  r
  r<   r`   nprandomru   r   )r3   r>   rd   layerdrop_probs       r7   r?   zData2VecAudioAdapter.forward  s    99 T%9%9%E IIm4M 00?M%//15[[ 	5EYY--/N==^dnn%D %m 4	5
 &//15r8   rA   rF   s   @r7   r  r    s    *r8   r  c                       e Zd ZU eed<   dZdZdZdZdZ	dZ
dZ ej                         d        Zddej                  ez  d	edz  fd
Z	 ddedej                  fdZy)Data2VecAudioPreTrainedModelr4   data2vec_audiorv   audioTc                    t        |t              rt        j                  d|j                  j
                  z        }t        j                  |j                  j                  | |       t        j                  |j                  j                  | |       yt        |t              r+t        j                  |j                  j                  d       yt        |t        j                        rct        j                  |j                  d| j                   j"                         |j                   t        j$                  |j                         yyt        |t        j&                  t        j(                  f      rX|j                  t        j$                  |j                         |j                   t        j*                  |j                         yyt        |t        j,                        rt        j.                  |j                         |j                  `t        j                  |j0                  |j2                  |j4                  d   z  z        }t        j                  |j                  | |       yyy)zInitialize the weightsr   )abr   r   )meanstdN)r   rz   mathsqrtr   in_featuresinituniform_weightr!   rP   	constant_r-   r   r   normal_r4   initializer_rangezeros_r.   	GroupNormones_r)   kaiming_normal_rS   in_channelsr   )r3   r   ks      r7   _init_weightsz*Data2VecAudioPreTrainedModel._init_weights  s    f<=		!f//;;;<AMM&++22qbA>MM&++00QB!< @ANN6;;++Q/		*LLSdkk6S6ST{{&FKK( 'r|| <={{&FKK(}}(

6==) )		*  /{{&IIfmmv/A/AFDVDVWXDY/YZ[fkkaR15 ' +r8   Ninput_lengthsadd_adapterc                 T   || j                   j                  n|}d }t        | j                   j                  | j                   j                        D ]  \  }} ||||      } |rBt        | j                   j                        D ]   } ||d| j                   j                        }" |S )zH
        Computes the output length of the convolutional layers
        c                 >    t        j                  | |z
  |d      dz   S )Nfloor)rounding_moder   )r   divinput_lengthr   r    s      r7   _conv_out_lengthzWData2VecAudioPreTrainedModel._get_feat_extract_output_lengths.<locals>._conv_out_length  s"     99\K7wWZ[[[r8   r   )r4   r*  zipr*   r+   r_   r  r  )r3   r)  r*  r2  r   r    rb   s          r7    _get_feat_extract_output_lengthsz=Data2VecAudioPreTrainedModel._get_feat_extract_output_lengths  s    
 2=1Ddkk--+	\
 $'t{{'>'>@W@W#X 	QK,]KPM	Q 4;;99: _ 04;;C]C] ^_ r8   feature_vector_lengthr   c                     |j                  d      d d df   }| j                  ||      }|j                  t        j                        }|j
                  d   }t        j                  ||f|j                  |j                        }d|t        j                  |j
                  d   |j                        |dz
  f<   |j                  dg      j                  d      j                  dg      j                         }|S )Nr;   r   r*  r   )dtyper   r   )r   )cumsumr4  r   r   longr   zerosr8  r   arangeflipr   )r3   r5  r   r*  non_padded_lengthsoutput_lengths
batch_sizes          r7   "_get_feature_vector_attention_maskz?Data2VecAudioPreTrainedModel._get_feature_vector_attention_mask  s    
 ,22r2:1b5A>>?Q_j>k'**5::6#))!,
./~7K7KTbTiTi
 uv^%9%9!%<^EZEZ[]kno]opq',,bT299"=BBB4HMMOr8   r]   )rB   rC   rD   r   __annotations__base_model_prefixmain_input_nameinput_modalitiessupports_gradient_checkpointing_supports_flash_attn_supports_sdpa_supports_flex_attnr   no_gradr(  
LongTensorr   r   r4  rA  r   r8   r7   r  r    s    ($O&*#NU]]_6 62e>N>NQT>T cgjncn , Y]%(:?:J:Jr8   r  r   	mask_probmask_length	min_masksr   c                    | \  }dk  rt        d      kD  rt        d d d      t        j                  j                  d      j	                         fd}|-|j                         j                  d      j                         nt        |      D cg c]  } c}}t        j                  |ft        	      }	g }
 |      }|d
k(  r|	S |D ]  } ||      }t        j                  j                  t        j                  |dz
  z
        |d      }t        |      d
k(  rdz
  }n|d
   }t        j                  |t        j                  ||z
  t        j                   	      |z  g      }|
j#                  |        t        j$                  |
      }
t        j&                  |
dddddf   ||f      }
|
j)                  ||z        }
t        j                        ddddf   }t        j&                  |||f      j)                  ||z        }|
|z   }
|
j+                         dz
  kD  rdz
  |
|
dz
  kD  <   t        j,                  |	|
dd       |	S c c}w )an  
    Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
    ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on
    CPU as part of the preprocessing during training.

    Args:
        shape: The shape for which to compute masks. This should be of a tuple of size 2 where
               the first element is the batch size and the second element is the length of the axis to span.
        mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                    independently generated mask spans of length `mask_length` is computed by
                    `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                    actual percentage will be smaller.
        mask_length: size of the mask
        min_masks: minimum number of masked spans
        attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                        each batch dimension.
    r   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                     t        | z  z  z         }t        |      }|z  kD  rz  }| dz
  z
  |k  rt        | dz
  z
  d      }|S )z;Given input length, compute how many spans should be maskedr   r   )r   max)r1  num_masked_spanepsilonrM  rL  rN  sequence_lengths     r7   compute_num_masked_spanz6_compute_mask_indices.<locals>.compute_num_masked_spanK  so    i,6DwNOoy9 [(?:-<O ;?+o=!,+/"BAFOr8   Nr;   r8  r   F)replace)r   r  r  r   itemdetachsumtolistr_   r;  r   choicer<  lenconcatenateonesint32appendarraybroadcast_tor   rR  put_along_axis)r   rL  rM  r   rN  r@  rV  rb   r)  spec_aug_maskspec_aug_mask_idxsmax_num_masked_spanr1  rS  spec_aug_mask_idxdummy_mask_idxoffsetsrT  rU  s    `` `            @@r7   _compute_mask_indicesrl  %  s   0 #(JQABB_$]^i]j&&7q:
 	
 iinnQ$$&G $ % 	##B'..0',Z'89!o9  HHj/:$GM1/Ba% 51,? II,,IIlkAo67RW - 
  !Q& -q0N.q1NNN(;o(MUWU]U] ^ao op
 	!!"34/52 "45 1a:&5H+(V ,33J@SVa@ab ii$T4]3Goog
4G'UV^^'+5G ,g5 /A"55GVYZGZ-!0CCD m%7B?w :s   $	I+c                   ,    e Zd Zdef fdZd Z	 	 ddej                  dej                  dz  dej                  dz  fdZ	e
	 	 	 	 	 dd	ej                  dz  dej                  dz  dej                  dz  d
edz  dedz  dedz  deez  fd       Z xZS )Data2VecAudioModelr4   c                    t         |   |       || _        t        |      | _        t        |      | _        |j                  dkD  s|j                  dkD  rEt        j                  t        j                  |j                        j                               | _        t!        |      | _        |j$                  rt'        |      nd | _        | j+                          y Nr   )r$   r%   r4   rf   feature_extractorrz   feature_projectionmask_time_probmask_feature_probr   	Parameterr   r   rT   r  masked_spec_embedr   encoderr*  r  adapter	post_initrW   s     r7   r%   zData2VecAudioModel.__init__  s     !<V!D"@"H   3&&*B*BS*H%'\\%,,v?Q?Q2R2[2[2]%^D"+F37=7I7I+F3t 	r8   c                 8    | j                   j                          yz
        Calling this function will disable the gradient computation for the feature encoder so that its parameter will
        not be updated during training.
        N)rq  rs   r3   s    r7   freeze_feature_encoderz)Data2VecAudioModel.freeze_feature_encoder  s    
 	113r8   Nr>   mask_time_indicesr   c                    t        | j                  dd      s|S |j                         \  }}}|)| j                  j	                  |j
                        ||<   n| j                  j                  dkD  r| j                  rt        ||f| j                  j                  | j                  j                  || j                  j                        }t        j                  ||j                  t        j                        }| j                  j	                  |j
                        ||<   | j                  j                  dkD  r| j                  rt        ||f| j                  j                  | j                  j                   | j                  j"                        }t        j                  ||j                  t        j                        }|dddf   j%                  d|d      }d||<   |S )	z
        Masks extracted features along time axis and/or along feature axis according to
        [SpecAugment](https://huggingface.co/papers/1904.08779).
        apply_spec_augmentTNr   )rL  rM  r   rN  )r   r8  )rL  rM  rN  r;   )getattrr4   r   rv  r   r8  rs  ru   rl  mask_time_lengthmask_time_min_masksr   r   r   r   rt  mask_feature_lengthmask_feature_min_masksexpand)r3   r>   r~  r   r@  rU  rT   mask_feature_indicess           r7   _mask_hidden_statesz&Data2VecAudioModel._mask_hidden_states  s    t{{$8$?   4A3E3E3G0
O[(/3/E/E/H/HI\I\/]M+,[[''!+ 5_-++44 KK88-++99! !&->}G[G[chcmcm n/3/E/E/H/HI\I\/]M+,;;((1,#8[)++77 KK;;++<<	$  $)<<0D]MaMainisis#t #74#@#G#GO]_#` 23M./r8   rv   r   r   r   r   c                 H   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }| j	                  |      }|j                  dd      }|!| j                  |j                  d   |d      }| j                  |      \  }	}| j                  |	||      }	| j                  |	||||      }
|
d   }	| j                  | j                  |	      }	|s
|	|f|
dd z   S t        |	||
j                  |
j                  	      S )
a/  
        mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
            masked extracted features in *config.proj_codevector_dim* space.
        Nr   rJ   Fr7  )r~  r   r   r   r   r   r   )r   extract_featuresr>   r   )r4   r   r   use_return_dictrq  r<   rA  r   rr  r  rw  rx  Data2VecAudioBaseModelOutputr>   r   )r3   rv   r   r~  r   r   r   r   r  r>   encoder_outputss              r7   r?   zData2VecAudioModel.forward  sb     2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]11,?+55a;%!DD &&q)>u E N +/*A*ABR*S''00->~ 1 
 ,,)/!5# ' 
 (*<<# LL7M!#34qr7JJJ++-)77&11	
 	
r8   r   NNNNN)rB   rC   rD   r   r%   r}  r   FloatTensorrK  r  r   r   r   r   r  r?   rE   rF   s   @r7   rn  rn    s    2 "4 7;26	,((, !,,t3, ((4/	,\  /36:)-,0#'8
llT)8
 t+8
 !,,t3	8

  $;8
 #Tk8
 D[8
 
-	-8
 8
r8   rn  rJ   zu
    Data2VecAudio Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
    )custom_introc                        e Zd Z fdZd Ze	 	 	 	 	 ddej                  dz  dej                  dz  dedz  dedz  dedz  d	ej                  dz  d
e	e
z  fd       Z xZS )Data2VecAudioForCTCc                    t         |   |       t        |      | _        t	        j
                  |j                        | _        |j                  t        d| j                   d      t        |d      r|j                  r|j                  n|j                  }t	        j                  ||j                        | _        | j#                          y)aZ  
        config ([`Data2VecAudioForCTC`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`]  method to load the model weights.
        NzYou are trying to instantiate z with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `Data2VecAudioForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.r*  )r$   r%   rn  r  r   r   final_dropoutr   
vocab_sizer   r6   hasattrr*  r   rT   r   lm_headry  )r3   r4   r   r6   s      r7   r%   zData2VecAudioForCTC.__init__,  s     	 08zz&"6"67$00@ AH H  *1)GFL^L^F%%djdvdv 	 yy!3V5F5FG 	r8   c                 L    | j                   j                  j                          yr{  r  rq  rs   r|  s    r7   r}  z*Data2VecAudioForCTC.freeze_feature_encoderG      
 	--@@Br8   Nrv   r   r   r   r   labelsr   c           
         ||n| j                   j                  }|I|j                         | j                   j                  k\  r"t	        d| j                   j                         | j                  |||||      }|d   }	| j                  |	      }	| j                  |	      }
d}|b||n$t        j                  |t        j                        }| j                  |j                  d            j                  t        j                        }|dk\  }|j                  d      }|j                  |      }t        j                   j#                  |
dt        j$                        j'                  dd      }t        j(                  j*                  j-                  d	
      5  t        j                   j/                  ||||| j                   j0                  | j                   j2                  | j                   j4                        }ddd       |s|
f|t6        d z   }||f|z   S |S t9        ||
|j:                  |j<                        S # 1 sw Y   ExY w)a  
        labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
            Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
            the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
            All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
            config.vocab_size - 1]`.
        Nz$Label values must be <= vocab_size: r  r   rW  r;   )r   r8  r   F)enabled)blank	reductionzero_infinitylosslogitsr>   r   )r4   r  rR  r  r   r  r   r  r   	ones_liker:  r4  r[  r   masked_selectr   r   log_softmaxfloat32r<   backendscudnnflagsctc_losspad_token_idctc_loss_reductionctc_zero_infinity_HIDDEN_STATES_START_POSITIONr   r>   r   )r3   rv   r   r   r   r   r  r   r   r>   r  r  r)  labels_masktarget_lengthsflattened_targets	log_probsoutputs                     r7   r?   zData2VecAudioForCTC.forwardN  s)   $ &1%<k$++B]B]&**,$++2H2H"HCDKKDZDZC[\]]%%)/!5# & 
  
]3m, #1"<%//R^fkfpfpBq  !AA.BTBTUWBXY\\]b]g]ghM !A+K(__R0N & 4 4[ A 11&b1V``abdefI%%++E+: 	}}--%!"++22"kk<<"&++"?"? . 	 Y)F)G!HHF)-)9TGf$EvEfG4I4IV]VhVh
 	
	 	s   A#IIr  )rB   rC   rD   r%   r}  r   r   r   r   r   r   r?   rE   rF   s   @r7   r  r  &  s    6C  /3)-,0#'&*E
llT)E
 t+E
  $;	E

 #TkE
 D[E
 t#E
 
	E
 E
r8   r  z
    Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
    SUPERB Keyword Spotting.
    c                        e Zd Z fdZd Zd Ze	 	 	 	 	 ddej                  dz  dej                  dz  de	dz  de	dz  d	e	dz  d
ej                  dz  de
ez  fd       Z xZS )&Data2VecAudioForSequenceClassificationc                    t         |   |       t        |d      r|j                  rt	        d      t        |      | _        |j                  dz   }|j                  r0t        j                  t        j                  |      |z        | _        t        j                  |j                  |j                         | _        t        j                  |j                   |j$                        | _        | j)                          y )Nr*  zdSequence classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)r   )r$   r%   r  r*  r   rn  r  r   use_weighted_layer_sumr   ru  r   r`  layer_weightsr   rT   classifier_proj_size	projector
num_labels
classifierry  r3   r4   
num_layersr6   s      r7   r%   z/Data2VecAudioForSequenceClassification.__init__  s     6=)f.@.@v  18--1
((!#ejj.Dz.Q!RD6#5#5v7R7RS))F$?$?ARARS 	r8   c                 L    | j                   j                  j                          yr{  r  r|  s    r7   r}  z=Data2VecAudioForSequenceClassification.freeze_feature_encoder  r  r8   c                 P    | j                   j                         D ]	  }d|_         yz
        Calling this function will disable the gradient computation for the base model so that its parameters will not
        be updated during training. Only the classification head will be updated.
        FNr  ro   rp   rq   s     r7   freeze_base_modelz8Data2VecAudioForSequenceClassification.freeze_base_model  *    
 ((335 	(E"'E	(r8   Nrv   r   r   r   r   r  r   c                 <   ||n| j                   j                  }| j                   j                  rdn|}| j                  |||||      }| j                   j                  rr|t           }	t        j                  |	d      }	t        j                  j                  | j                  d      }
|	|
j                  ddd      z  j                  d      }	n|d   }	| j                  |	      }	||	j                  d      }n| j                  |	j                   d   |      }|j#                  d      j%                  dd|	j                   d         }d	|	| <   |	j                  d      |j                  d      j                  dd      z  }| j'                  |      }d}|Ft)               } ||j                  d| j                   j*                        |j                  d            }|s|f|t        d z   }||f|z   S |S t-        |||j.                  |j0                  
      S )  
        input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
            into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
            (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
            To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
            into a tensor of type `torch.FloatTensor`. See [`Data2VecAudioProcessor.__call__`] for details.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        NTr  r   r   r;   r   rJ   r   r  )r4   r  r  r  r  r   stackr   r   r   r  r   r[  r  r  rA  r   r   r   r  r   r  r   r>   r   )r3   rv   r   r   r   r   r  r   r   r>   norm_weightspooled_outputpadding_maskexpand_padding_maskr  r  loss_fctr  s                     r7   r?   z.Data2VecAudioForSequenceClassification.forward  s!   0 &1%<k$++B]B]'+{{'I'ItOc%%)/!5# & 
 ;;--#$ABM!KK1=M==001C1C0LL*\->->r1a-HHMMRSMTM#AJM}5!)..1.5MBB=CVCVWXCY[ijL"."8"8"<"C"CAq-J]J]^_J`"a25M../)--!-4|7G7GA7G7N7S7STVXY7ZZM/')HFKKDKK,B,BCV[[QS_UDY)F)G!HHF)-)9TGf$EvE'!//))	
 	
r8   r  )rB   rC   rD   r%   r}  r  r   r   r   r   r   r   r?   rE   rF   s   @r7   r  r    s    "C(  /3)-,0#'&*C
llT)C
 t+C
  $;	C

 #TkC
 D[C
 t#C
 
)	)C
 C
r8   r  c                        e Zd Z fdZd Zd Ze	 	 	 	 	 ddej                  dz  dej                  dz  dej                  dz  de	dz  d	e	dz  d
e	dz  de
ez  fd       Z xZS )(Data2VecAudioForAudioFrameClassificationc                    t         |   |       t        |d      r|j                  rt	        d      t        |      | _        |j                  dz   }|j                  r0t        j                  t        j                  |      |z        | _        t        j                  |j                  |j                         | _        |j                   | _        | j%                          y )Nr*  zgAudio frame classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)r   )r$   r%   r  r*  r   rn  r  r   r  r   ru  r   r`  r  r   rT   r  r  ry  r  s      r7   r%   z1Data2VecAudioForAudioFrameClassification.__init__  s     6=)f.@.@y  18--1
((!#ejj.Dz.Q!RD))F$6$68I8IJ ++r8   c                 L    | j                   j                  j                          yr{  r  r|  s    r7   r}  z?Data2VecAudioForAudioFrameClassification.freeze_feature_encoder  r  r8   c                 P    | j                   j                         D ]	  }d|_         yr  r  rq   s     r7   r  z:Data2VecAudioForAudioFrameClassification.freeze_base_model  r  r8   Nrv   r   r  r   r   r   r   c           	         ||n| j                   j                  }| j                   j                  rdn|}| j                  |||||      }| j                   j                  rr|t           }	t        j                  |	d      }	t        j                  j                  | j                  d      }
|	|
j                  ddd      z  j                  d      }	n|d   }	| j                  |	      }d}|\t               } ||j                  d| j                        t        j                   |j                  d| j                        d            }|s|f|t        d z   }|S t#        |||j$                  |j&                  	      S )
r  NTr  r   r   r;   r   )axisr  )r4   r  r  r  r  r   r  r   r   r   r  r   r[  r  r   r  argmaxr   r>   r   )r3   rv   r   r  r   r   r   r   r   r>   r  r  r  r  r  s                  r7   r?   z0Data2VecAudioForAudioFrameClassification.forward&  sj   0 &1%<k$++B]B]'+{{'I'ItOc%%)/!5# & 
 ;;--#$ABM!KK1=M==001C1C0LL*\->->r1a-HHMMRSMTM#AJM/')HFKKDOO<ell6;;WY[_[j[jKkrs>tuDY)F)G!HHFM$!//))	
 	
r8   r  )rB   rC   rD   r%   r}  r  r   r   r   r   r   r   r?   rE   rF   s   @r7   r  r    s     C(  /3&*)-,0#':
llT):
 t+:
 t#	:

  $;:
 #Tk:
 D[:
 
&	&:
 :
r8   r  c                   &     e Zd Zd fd	Zd Z xZS )AMSoftmaxLossc                     t         |           || _        || _        || _        t        j                  t        j                  ||      d      | _	        t        j                         | _        y )NT)rp   )r$   r%   scalemarginr  r   ru  r   randnr  r   r  )r3   	input_dimr  r  r  r6   s        r7   r%   zAMSoftmaxLoss.__init__e  sQ    
$ll5;;y*#EUYZ'')	r8   c                    |j                         }t        j                  j                  | j                  d      }t        j                  j                  |d      }t        j                  ||      }|| j                  z
  }t        j                  j                  || j                        }| j                  t        j                  |j                         ||      z  }| j                  ||      }|S )Nr   r   r   )flattenr   r   	normalizer  r   mmr  one_hotr  r  wherer   r  )	r3   r>   r  r  	cos_thetapsionehotr  r  s	            r7   r?   zAMSoftmaxLoss.forwardm  s    !((!(<//1/EHH]F3	$++%&&vt?ekk&++-iHHyy(r8   )g      >@g?rA   rF   s   @r7   r  r  d  s    *r8   r  c                   X     e Zd Zd fd	Zdej
                  dej
                  fdZ xZS )	TDNNLayerc                    t         |           |dkD  r|j                  |dz
     n|j                  |   | _        |j                  |   | _        |j
                  |   | _        |j                  |   | _        t        j                  | j                  | j                  z  | j                        | _        t        j                         | _        y )Nr   r   )r$   r%   tdnn_dimr'   r(   tdnn_kernelr   tdnn_dilationdilationr   r   kernelReLUr1   r2   s      r7   r%   zTDNNLayer.__init__|  s    <DqL6??8a<8foo^fNg"OOH5!--h7,,X6ii 0 043C3C CTEVEVW'')r8   r>   r   c                 &   t               rddlm} t               r+t        | j                        rt        j                  d       |j                  dd      }| j                  j                  j                  | j                  | j                  | j                        j                  dd      }t        j                  j                  ||| j                  j                   | j"                        }|j                  dd      }| j%                  |      }|S )Nr   )	LoraLayerzDetected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. You should exclude TDNNLayer from LoRA's target modules.r   rJ   )r  )r   peft.tuners.lorar  r   r  warningswarnr<   r  r   r(   r   r'   r   r   conv1dr!   r  r1   )r3   r>   r  r  s       r7   r?   zTDNNLayer.forward  s    2$++y1O &//15##(():):D<L<LdN^N^_iijkmno,,]FDKKDTDT_c_l_l,m%//156r8   r@   )rB   rC   rD   r%   r   r   r?   rE   rF   s   @r7   r  r  {  s#    $U\\ ell r8   r  zq
    Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.
    c                        e Zd Z fdZd Zd Zdej                  ez  fdZ	e
	 	 	 	 	 ddej                  dz  dej                  dz  d	edz  d
edz  dedz  dej                  dz  deez  fd       Z xZS )Data2VecAudioForXVectorc                    t         |   |       t        |      | _        |j                  dz   }|j
                  r0t        j                  t        j                  |      |z        | _
        t        j                  |j                  |j                  d         | _        t        t!        |j                              D cg c]  }t#        ||       }}t        j$                  |      | _        t        j                  |j                  d   dz  |j(                        | _        t        j                  |j(                  |j(                        | _        t/        |j(                  |j0                        | _        | j5                          y c c}w )Nr   r   r;   rJ   )r$   r%   rn  r  r   r  r   ru  r   r`  r  r   rT   r  r  r_   r^  r  r^   tdnnxvector_output_dimrq  r  r  r  	objectivery  )r3   r4   r  rl   tdnn_layersr6   s        r7   r%   z Data2VecAudioForXVector.__init__  s    08--1
((!#ejj.Dz.Q!RD6#5#5vq7IJ5:3v;O5PQy+QQMM+.	!#6??2+>+BFD]D]!^))F$=$=v?X?XY&v'@'@&BSBST Rs   >Fc                 L    | j                   j                  j                          yr{  r  r|  s    r7   r}  z.Data2VecAudioForXVector.freeze_feature_encoder  r  r8   c                 P    | j                   j                         D ]	  }d|_         yr  r  rq   s     r7   r  z)Data2VecAudioForXVector.freeze_base_model  r  r8   r)  c                 V    d }| j                   j                  D ]  } |||d      } |S )z?
        Computes the output length of the TDNN layers
        c                     | |z
  |z  dz   S )Nr   r   r0  s      r7   r2  zJData2VecAudioForXVector._get_tdnn_output_lengths.<locals>._conv_out_length  s     !;.69A==r8   r   )r4   r  )r3   r)  r2  r   s       r7   _get_tdnn_output_lengthsz0Data2VecAudioForXVector._get_tdnn_output_lengths  s:    
	>
  ;;22 	LK,]KKM	L r8   Nrv   r   r   r   r   r  r   c                    ||n| j                   j                  }| j                   j                  rdn|}| j                  |||||      }| j                   j                  rr|t           }	t        j                  |	d      }	t        j                  j                  | j                  d      }
|	|
j                  ddd      z  j                  d      }	n|d   }	| j                  |	      }	| j                  D ]
  } ||	      }	 |%|	j                  d      }|	j!                  d      }n| j#                  |j                  d            }| j%                  |      }g }g }t'        |      D ]U  \  }}|j)                  |	|d|f   j                  d             |j)                  |	|d|f   j!                  d             W t        j                  |      }t        j                  |      }t        j*                  ||gd      }| j-                  |      }| j/                  |      }d}|| j1                  ||      }|s||f|t        d z   }||f|z   S |S t3        ||||j4                  |j6                        S )	r  NTr  r   r   r;   r   )r  r  
embeddingsr>   r   )r4   r  r  r  r  r   r  r   r   r   r  r   r[  r  r  r  r  r4  r  	enumeraterb  catrq  r  r  r   r>   r   )r3   rv   r   r   r   r   r  r   r   r>   r  
tdnn_layermean_featuresstd_featuresfeat_extract_output_lengthstdnn_output_lengthsrl   lengthstatistic_poolingoutput_embeddingsr  r  r  s                          r7   r?   zData2VecAudioForXVector.forward  s   0 &1%<k$++B]B]'+{{'I'ItOc%%)/!5# & 
 ;;--#$ABM!KK1=M==001C1C0LL*\->->r1a-HHMMRSMTM#AJM}5)) 	6J&}5M	6 !)..1.5M(,,,3L*.*O*OP^PbPbghPbPi*j'"&"?"?@["\ML&':; J	6$$]1gvg:%>%C%C%C%JK##M!WfW*$=$A$Aa$A$HIJ "KK6M ;;|4L!II}l&CL 223DE!23>>&&1D/07;X;Y3ZZF)-)9TGf$EvE(!//))
 	
r8   r  )rB   rC   rD   r%   r}  r  r   rK  r   r  r   r   r   r   r   r?   rE   rF   s   @r7   r  r    s    &C(e6F6F6L   /3)-,0#'&*P
llT)P
 t+P
  $;	P

 #TkP
 D[P
 t#P
 
	P
 P
r8   r  )r  r  r  r  rn  r  rp  rN   )Kr  r  collections.abcr   numpyr  r   r   torch.nnr    r   r  activationsr   integrations.deepspeedr	   integrations.fsdpr
   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   configuration_data2vec_audior   r   ModulerH   rP   r[   rf   rz   r   r   r   r   r   r   r   r   r  r  r   r   rK  ndarrayrl  r  rn  r  r  r  r  r  r  r  __all__r   r8   r7   <module>r      s  *   $    % & ! @ 7 6 B 9  G & J J =7 6BII ryy 6299 ")) :1RYY 1* !%II%<<% 
% <<	%
 LL4'% T\% % '(%8S/RYY S/lryy 0! : !HE
299 E
P		 $299 > K? K Kd /3tc?tt t $$t+	t
 t ZZtn  7  @
5 @
 @
F !"  
i
6 i

i
X e
-I e
e
P [
/K [
 [
|BII .		 @ 
C
: C

C
Lr8   