
    qiɊ                        d dl mZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZ ddlmZ ddlm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+ ddl,m-Z-m.Z. ddl/m0Z0  ejb                  e2      Z3e ed       G d de                    Z4 G d de)      Z5 G d de*      Z6 G d de'      Z7 G d d e$      Z8 G d! d"e%      Z9 ed#      e G d$ d%e                    Z:e G d& d'e(e:             Z; G d( d)ejx                        Z= ed*       G d+ d,e&e             Z> G d- d.ejx                        Z?e G d/ d0e(             Z@ ed1       G d2 d3e:e0             ZAg d4ZBy)5    )	dataclassN   )initialization)CacheDynamicCache)GenerationMixin)create_causal_mask)BaseModelOutputWithPastCausalLMOutputWithPast)PreTrainedModel)Unpack)ModelOutputauto_docstringcan_return_tuplelogging)merge_with_config_defaults)is_torchdynamo_compiling)capture_outputs   )	AutoModel)LlamaAttentionLlamaDecoderLayerLlamaForCausalLMLlamaMLP
LlamaModelLlamaRMSNormLlamaRotaryEmbeddingTransformersKwargs   )	CsmConfigCsmDepthDecoderConfig)CsmGenerationMixinz:
    Base class for the model autoregressive outputs.
    )custom_introc                      e Zd ZU dZdZej                  dz  ed<   dZej                  dz  ed<   dZ	e
dz  ed<   dZeej                  df   dz  ed<   dZeej                  df   dz  ed<   dZej                  dz  ed	<   dZej                  dz  ed
<   dZe
dz  ed<   dZeej                  df   dz  ed<   dZeej                  df   dz  ed<   dZej                  dz  ed<   y)CsmOutputWithPasta	  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction).
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    depth_decoder_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction) of the depth decoder model.
    depth_decoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the depth decoder (scores for each vocabulary token before SoftMax).
    depth_decoder_past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
    depth_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    depth_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`.
    backbone_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction) of the backbone model.
    Nlosslogitspast_key_values.hidden_states
attentionsdepth_decoder_lossdepth_decoder_logitsdepth_decoder_past_key_valuesdepth_decoder_hidden_statesdepth_decoder_attentionsbackbone_loss)__name__
__module____qualname____doc__r&   torchFloatTensor__annotations__r'   r(   r   r)   tupler*   r+   r,   r-   r.   r/   r0        U/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/csm/modular_csm.pyr%   r%   1   s   8 &*D%

d
")'+FE$+$(OUT\(:>M5**C/047>7;Je'',-4;37))D0759%++d2926!54<6HLu'8'8#'=!>!ELEIeE$5$5s$:;dBI.2M5$$t+2r:   r%   c                       e Zd Zy)
CsmRMSNormNr1   r2   r3   r9   r:   r;   r=   r=   b       r:   r=   c                       e Zd Zy)CsmRotaryEmbeddingNr>   r9   r:   r;   rA   rA   f   r?   r:   rA   c                       e Zd Zy)CsmMLPNr>   r9   r:   r;   rC   rC   j   r?   r:   rC   c                       e Zd Zy)CsmAttentionNr>   r9   r:   r;   rE   rE   n   r?   r:   rE   c                       e Zd Zy)CsmDecoderLayerNr>   r9   r:   r;   rG   rG   r   r?   r:   rG   z[
    The bare Csm Model outputting raw hidden-states without any specific head on top.
    c                        e Zd ZU eed<   dZdZdZdgZdgZ	dZ
dZdZdZeedZ ej$                          fd       Z xZS )	CsmPreTrainedModelconfigmodel)audiotextTrG   r(   )r)   r*   c                    t         |   |       t        |t              rV|j                  }t        |dz
        D ]8  }t        j                  |j                  d| j                  j                         : y t        |t              r_t        j                  |j                  t        j                  | j                  j                        | j                  j                   z         y y )Nr   g        )meanstd)super_init_weights
isinstanceCsmCodebooksHeadnum_codebooksrangeinitnormal_weightrJ   initializer_rangeCsmBackboneModelEmbeddingscopy_audio_tokens_offsetsr5   arange
vocab_size)selfmodulerU   i	__class__s       r;   rR   z CsmPreTrainedModel._init_weights   s    f%f./"00M=1,- YV]]$++:W:WXY :;JJv22ELLAZAZ4[^b^i^i^t^t4tu <r:   )r1   r2   r3   r    r7   base_model_prefixinput_modalitiessupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_can_compile_fullgraph_supports_attention_backendrG   rE   _can_record_outputsr5   no_gradrR   __classcell__rc   s   @r;   rI   rI   v   st     (&*#*+#4"5N ""&("
 U]]_v vr:   rI   c                   >    e Zd ZU eed<    fdZeee	 	 	 	 	 	 	 	 dde	j                  dz  de	j                  dz  de	j                  dz  de	j                  dz  dedz  d	e	j                  dz  d
edz  de	j                  dz  dee   deez  fd                     Z xZS )CsmDepthDecoderModelrJ   c                     t         |   |       t        j                  |j                  |j
                  z  |j                        | _        t        j                  |j                  |j                  d      | _
        y NF)bias)rQ   __init__nn	EmbeddingrU   r_   backbone_hidden_sizeembed_tokensLinearhidden_sizeinputs_embeds_projectorr`   rJ   rc   s     r;   rv   zCsmDepthDecoderModel.__init__   s]     LL&*>*>ARAR*RU[UpUpq')yy1L1LfN`N`gl'm$r:   N	input_idsbackbone_last_hidden_stateattention_maskposition_idsr(   inputs_embeds	use_cachecache_positionkwargsreturnc	                    |!t               st        j                  d       d}|du |duz  rt        d      |r|t	        | j
                        }|i||j                         nd}
||j                  d   n|j                  d   }||j                  n|j                  }t        j                  |
|
|z   |      }|rt        j                  |dz
  d      }|| j                  z  }| j                  ||z         }|d   dk(  }|
||dddf<   n!t               s|rt        j                  d	       | j                  |      }t!        | j
                  |||||
      }|}|j#                  d      }| j%                  ||      }| j&                  d| j
                  j(                   D ]  } ||f||||||d|	} | j+                  |      }t-        ||r|      S d      S )aJ  
        backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*):
            The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model)
            is provided in the `input_ids` argument.
        NzCustom `position_ids` were provided but will be ignored. CSM depth decoder automatically determines position_ids from `cache_position` and as it requires them to be identical across the batch, the provided position_ids will be ignored.z;You must specify exactly one of input_ids or inputs_embeds.)rJ   r   r   device)minzvWhen the first codebook token is provided, `backbone_last_hidden_state` should also be provided for correct inference.)rJ   r   r   r   r(   r   )r   )r   r   r(   r   r   position_embeddings)last_hidden_stater(   )r   loggerwarning_once
ValueErrorr   rJ   get_seq_lengthshaper   r5   r^   clampr_   rz   warningr}   r	   	unsqueeze
rotary_emblayersnum_hidden_layersnormr
   )r`   r   r   r   r   r(   r   r   r   r   past_seen_tokensinputs_seq_lengthr   codebook_idxsoffsetinput_ids_are_first_codebookcausal_maskr)   r   decoder_layers                       r;   forwardzCsmDepthDecoderModel.forward   s4   ( #,D,FM  L-t";<Z[[0*$++>O!CRC^==?de:G:S 3 3A 6YbYhYhijYk-:-F]))IL\L\F"\\*:<LO`<`iopN !KK(:BM"T__4F --i&.@AM+9!+<+A()5&@ad#/16RNN Q 44]C(;;'))+%
 & &//2"oom,oW![[)H4;;+H+HI 
	M)	*) /#-$7	 	M
	 		-0&+/8O
 	
>B
 	
r:   )NNNNNNNN)r1   r2   r3   r!   r7   rv   r   r   r   r5   
LongTensorr6   Tensorr   boolr   r   r8   r
   r   ro   rp   s   @r;   rr   rr      s   !!n
   .2?C.204(,26!%26R
##d*R
 %*$5$5$<R
 t+	R

 &&-R
 R
 ((4/R
 $;R
 ((4/R
 +,R
 
(	(R
    R
r:   rr   c                   &     e Zd Z fdZddZ xZS )rT   c                     t         |           || _        t        j                  t        j                  | j                  dz
  ||            | _        y )Nr   )rQ   rv   rU   rw   	Parameterr5   emptyrY   )r`   r|   rU   r_   rc   s       r;   rv   zCsmCodebooksHead.__init__   s?    *ll5;;t/A/AA/E{T^#_`r:   c           
         |2|j                   d   }| j                  t        j                  |         }n|dz
  }| j                  |   }t	        |j                   d         D cg c]9  }t
        j                  j                  |d d |d d f   ||   j                        ; }}t        j                  |d      }|S c c}w )Nr   r   dim)
r   rY   r5   r^   rV   rw   
functionallinearTstack)r`   r)   r   
seq_lengthcodebook_weightr   codebook_idxs          r;   r   zCsmCodebooksHead.forward  s    !&,,Q/J"kk%,,z*BCO*Q.M"kk-8O !&o&;&;A&> ?
 MM  q,/A!BOT`DaDcDcd
 
 Mq9
s   #>B<Nr1   r2   r3   rv   r   ro   rp   s   @r;   rT   rT      s    a
r:   rT   a$  
    The CsmDepthDecoder Model transformer, with a [`CsmCodebooksHead`] on top,
    which can be seen a position-specific language modeling head, allowing to use a different linear layer for each codebook
    (e.g. position 0 is the first codebook and uses the first codebook head, etc.)
    c                       e Zd ZdZdZdZ fdZ	 	 	 	 	 ddej                  de	dz  de
dz  dej                  dz  dej                  dz  dej                  dz  f fd	Zee	 	 	 	 	 	 	 	 	 	 ddej                  dz  d
ej                  dz  dej                  dz  dej                  dz  de
dz  dej                  dz  dej                  dz  dedz  dej                  dz  de	ej                  z  dee   deez  fd              Z xZS )CsmDepthDecoderForCausalLMNc                     t         |   |       | `t        |j                  |j
                  |j                        | _        t        |      | _	        y r   )
rQ   rv   lm_headrT   r|   rU   r_   codebooks_headrr   rK   r~   s     r;   rv   z#CsmDepthDecoderForCausalLM.__init__  sE     L.v/A/A6CWCWY_YjYjk)&1
r:   r   next_sequence_lengthr(   r   r   r   c                     t        
|   ||||||fi |}|d   d   dk(  }	|	s|j                  d       |j                  d       |S )Nr   r   r   r   )rQ   prepare_inputs_for_generationpop)r`   r   r   r(   r   r   r   r   model_inputsis_first_generation_steprc   s             r;   r   z8CsmDepthDecoderForCausalLM.prepare_inputs_for_generation$  sk     w<+_nm]k
ou
 $00@#A!#D#I '9: 	(r:   r   r   labelsr   logits_to_keepr   r   c                     | j                   d||||||||	d|}|d   }t        |
t              r |
dk(  rt        dd      }nt        |
 d      }n|
}| j	                  |dd|ddf   |	|	|   nd      }|j                         }d}|B|dddf   j                         } | j                  d|d| j                  j                  |d|}t        |||j                  |j                  |j                        S )	a  
        backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*):
            The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model)
            is provided in the `input_ids` argument.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
        )r   r   r   r   r(   r   r   r   r   r   N.)r'   r   r_   shift_labels)r&   r'   r(   r)   r*   r9   )rK   rS   intslicer   
contiguousloss_functionrJ   r_   r   r(   r)   r*   )r`   r   r   r   r   r(   r   r   r   r   r   r   outputsr)   slice_indicesr'   r&   r   s                     r;   r   z"CsmDepthDecoderForCausalLM.forward;  s;   2 $** 

'A)%+')

 

  
nc*" %a %~ot <*M$$!]A-.Q_Qk}0Mqu
 ""$!#qr'?557L%4%% dt{{7M7M\hlrD &#33!//))
 	
r:   NNNNN)
NNNNNNNNNr   )r1   r2   r3   _tied_weights_keys_tp_plan_pp_planrv   r5   r   r   r   r6   r   r   r   r   r   r   r   r8   r   r   ro   rp   s   @r;   r   r     s    HH2 ,0(,262626## "Dj 	
 ((4/ ((4/ ((4/.  .2?C.204(,26*.!%26-.@
##d*@
 %*$5$5$<@
 t+	@

 &&-@
 @
 ((4/@
   4'@
 $;@
 ((4/@
 ell*@
 +,@
 
'	'@
  @
r:   r   c                   $     e Zd Z fdZd Z xZS )r[   c                    t         |           t        j                  |j                  |j
                  z  |j                        | _        | j                  dt        j                  |j                        |j
                  z  d       y )Nr]   F)
persistent)rQ   rv   rw   rx   rU   codebook_sizer|   embed_audio_tokensregister_bufferr5   r^   r~   s     r;   rv   z#CsmBackboneModelEmbeddings.__init__  sn    "$,,0D0DvG[G[0[^d^p^p"q"ELL1E1E$FI]I]$]jo 	 	
r:   c                 f    | j                  || j                  z         }|j                  d      }|S )Nr   r   )r   r]   sum)r`   r   r   s      r;   r   z"CsmBackboneModelEmbeddings.forward  s6    //	D<U<U0UV%))a)0r:   r   rp   s   @r;   r[   r[     s    
r:   r[   c                   F     e Zd Z fdZeee fd                     Z xZS )CsmBackboneModelc                 D    t         |   |       t        |      | _        y r   )rQ   rv   r[   rz   r~   s     r;   rv   zCsmBackboneModel.__init__  s     6v>r:   c                 "    t        |   di |S )a&  
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks) or (batch_size, sequence_length)`):
            1. (batch_size, sequence_length): corresponds to the input sequence prepared with the processor from the text prompt. Such input
            requires `input_values` to be provided so that audio can be encoded in codebook tokens and then merged with the text tokens.

            2. (batch_size, sequence_length, num_codebooks): codebook tokens generated during the autoregressive decoding. Such input is not meant to be used by end users.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        r9   )rQ   r   )r`   super_kwargsrc   s     r;   r   zCsmBackboneModel.forward  s      w...r:   )	r1   r2   r3   rv   r   r   r   r   ro   rp   s   @r;   r   r     s,    ?  /    /r:   r   z
    The Csm model consists of two llama-like auto-regressive transformer models: a backbone model that predicts the first codebook token and a depth decoder that predicts the other codebook tokens.
    c                        e Zd ZddiZ fdZd Zd Ze fd       Z fdZ		 	 	 	 dd	e
j                  dz  d
e
j                  dz  de
j                  dz  de
j                  dz  de
j                  dz  f
dZ	 	 	 	 	 dd	e
j                  dedz  dedz  de
j                  dz  de
j                   dz  de
j                  dz  f fdZee	 	 	 	 	 	 	 	 	 	 	 dd	e
j                  dz  d
e
j                  dz  de
j                  dz  de
j                  dz  de
j                  dz  dedz  de
j                   dz  de
j                  dz  dedz  de
j                  dz  dee
j                  z  dee   deez  fd              Z xZS )CsmForConditionalGenerationz5backbone_model.embed_tokens.embed_audio_tokens.weightz'depth_decoder.model.embed_tokens.weightc                    t         |   |       |j                  | _        t        j                  |j
                  |j                  d      | _        t        j                  |j                  |j
                        | _	        t        j                  |      | _        t        j                  |j                        | _        t!        j"                  |j$                        | _        | j)                          y rt   )rQ   rv   r_   rw   r{   r|   r   rx   text_vocab_sizeembed_text_tokensr   _from_configbackbone_modelr   depth_decoder_configdepth_decoderr   from_configcodec_configcodec_model	post_initr~   s     r;   rv   z$CsmForConditionalGeneration.__init__  s      ++yy!3!3V5F5FUS!#f.D.DfFXFX!Y.;;FC7DDVE`E`a$001D1DEr:   c                 .    | j                   j                  S r   r   rz   )r`   s    r;   get_input_embeddingsz0CsmForConditionalGeneration.get_input_embeddings  s    ""///r:   c                 &    || j                   _        y r   r   )r`   values     r;   set_input_embeddingsz0CsmForConditionalGeneration.set_input_embeddings  s    +0(r:   c                    |j                  dd      rt        
|   |i |\  }}nt        
|   |i |}d}t        |      }t	        |j
                        j                         D ci c]  \  }}|j                  |      r||d  | }	}}t	        |j                  j
                        j                  ddi|	       |	D ]  }t        |j
                  ||z           d|v r|fS |S c c}}w )Noutput_loading_infoFdepth_decoder__from_model_config)getrQ   from_pretrainedlenvarsgeneration_configitems
startswithr   updatedelattr)clsargsr   rK   loading_infoprefix
prefix_lenattrr   depth_decoder_attrsrc   s             r;   r   z+CsmForConditionalGeneration.from_pretrained  s   ::+U3"''"94"J6"JE<G+T<V<E "[
  $E$;$;<BBD
ev& u$
 
 	U  223::<PRW;o[n;op ( 	<DE++Vd];	< !F*,&&L
s   )!C)c                     d}| j                   j                  j                         }|j                  dd        |j	                         D ]  \  }}t        | j                  ||z   |       ! t        |   |i | y )Nr   transformers_version)r   r   to_diff_dictr   r   setattrrQ   save_pretrained)r`   r   r   r   r  r   r   rc   s          r;   r  z+CsmForConditionalGeneration.save_pretrained  s|    !"00BBOOQ 6=.446 	BKD%D**FTM5A	B 	00r:   Nr   input_valuesinput_values_cutoffsr   r   c                    | j                  |      }|Ut        j                  j                  |d      }||dk\     j	                         }||dkD     }t        j                  |j                         |j                        j                  t        |      d      }||j                  d      k  }t        j                         5  g }t        ||      D ]  \  }	}
|
|
dk\     }
t        |
j                  d   dz
        D ]r  }|
|   }|
|dz      }|	d||f   }| j                   j#                  |j                  d            }|j$                  j'                  dd      }|j)                  |d          t  t        d |D              }t        j*                  |D cg c]6  }t        j                  j                  |ddd||j                  d   z
  f      8 c}      }| j                   j-                  |      }ddd       | j.                  j0                  }||k(  }| j2                  j5                        }|   ||<   t        j6                  dd| j.                  j8                  f|j                  t
        j:                  	      | j.                  j<                  z  }| j2                  j5                  |      j?                  d      }|| j.                  j@                  k(  }|jC                  |jE                         d      ||<   |j|j                  d      jC                  dd| j.                  j8                        }||   ||<   |||<   |d
k(  jG                  d      }d||d   |d   ddf<   |}||dS c c}w # 1 sw Y   xY w)a  
        Merges the input_ids and input_values to produce a single inputs_embeds tensor:
        1 - Infers the codec model on the input_values to retrieve codebook token.
        2 - Embeds codebook tokens and places them at the correct positions in the inputs_embeds tensor.
        3 - If labels are provided, expands them to match codebook dimensions and position the target codebook tokens in the inputs_embeds tensor.

        Args:
            input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
                The input ids to embed.
            input_values (`torch.Tensor` of shape `(batch_size, channels, audio_sequence_length)`):
                The audio input values to embed.
            input_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`):
                The cutoffs of the audio input values relative to its batch index, padded with -1 when no audio.
        Nr   r   r   r   r   .c              3   :   K   | ]  }|j                   d      yw)r   N)r   ).0els     r;   	<genexpr>zQCsmForConditionalGeneration._merge_input_ids_with_input_values.<locals>.<genexpr>  s     &Orrxx{&Os   )r   dtypeiTas_tuple)r   r   )$r   rw   r   paddiffr5   r^   maxr   expandr   r   rn   ziprV   r   r   encodeaudio_codes	transposeappendr   get_audio_codes_maskrJ   audio_token_idr   rz   onesrU   longcodebook_eos_token_idsqueezeaudio_eos_token_idrepeatr   nonzero)r`   r   r  r  r   r   audio_lengthsinput_values_maskaudio_tokens_listbatch_input_valuesbatch_input_values_cutoffsrb   	start_idxend_idxaudio_batchcodec_outputscodebook_idsmax_audio_framesr  batched_audio_token_idsaudio_codes_maskr  audio_token_maskaudio_embedsaudio_eos_frame_idsaudio_eos_embedsaudio_eos_token_masklabels_expanded depth_decoder_ignore_frames_idxss                                r;   "_merge_input_ids_with_input_valuesz>CsmForConditionalGeneration._merge_input_ids_with_input_values  s   * ..y9##%==#4#45I6#R 01E1JKPPRM)-!*;<M %-A-E-E-GP\PcPc d k kM"B! !2M4K4KA4N N
  \$&!FI,XlFm BB&(B1KLfjkLk1l."#=#C#CA#F#JK B$>q$A	"<QU"C&8i>O9O&P(,(8(8(?(?@U@UVW@X(Y'4'@'@'J'J1b'Q)00aABB $'&O=N&O#O */++`qrZ\R]]&&rAq!5EQR5S+TUr+' $(#3#3#H#HIZ#[ !\$ "[[77N(N:..;;<STL.:;K.LM*+ 

Aq$++";";<YEUEU]b]g]gh++334    $22??@ST\\]^_#,0N0N#N 2B2I2IJ^JbJbJdfg2hM./ !"("2"22"6"="=aDKKD]D]"^4KL\4] 018K 454:dN3K3KUY3K3Z0pt @ CEefgEhjkjl lm(!.&AA= s\ \s   CM4;M/
"M4/M44M>r   r(   r   r   r   c           
      2   t        
|   d	||||||d|}|}|j                  dk(  rn|j                  d      ]| j	                  ||j                  d      |j                  d      |j                  d            }	|j                  |	d   |	d   d d       |S )
N)r   r   r(   r   r   r   r   r   r  r  r   )r   r  r  r   )r   r   r   r9   )rQ   r   ndimr   r:  r   )r`   r   r   r(   r   r   r   r   r   merged_inputsrc   s             r;   r   z9CsmForConditionalGeneration.prepare_inputs_for_generation8  s     w< 
!5+)')
 
  Y^^q%8\=M=Mo=^=f CC##ZZ7%+ZZ0F%Gzz(+	 D M "/"@MZbLcrvw r:   r   r   r   r   c                    |/|j                   dk(  r | j                  ||||      }|d   }|d   }d} | j                  d||||||	|
d|}|d   }t        |t              rt        | d      n|}| j                  |dd|ddf         }d}d}d}d}||dddddf   } | j                  d||| j                  j                  d|}|ddddddf   d	k(  j                  d
       }||   dd| j                  j                  dz
  f   }t        j                  j                  |dd      }|j                  d      }||d   |d   dz
  ddf   }||   } | j                   d|||	d|d|}|j"                  }||z   }t%        |||||j&                  |j(                  |j*                  ||j,                  nd||j&                  nd||j(                  nd||j*                        S d      S )a  
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks) or (batch_size, sequence_length)`):
            1. (batch_size, sequence_length): corresponds to the input sequence prepared with the processor from the text prompt. Such input
            requires `input_values` to be provided so that audio can be encoded in codebook tokens and then merged with the text tokens.

            2. (batch_size, sequence_length, num_codebooks): codebook tokens generated during the autoregressive decoding. Such input is not meant to be used by end users.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        input_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`, *optional*):
            Specify the end positions of audio segments within each batch entry, relative to the concatenated audio input.
            If a batch entry has fewer segments than the maximum, it is padded with -1. For example, in a batch of 2 sequences
            where the first contains 2 audio segments of length l1, and the second contains 1 audio segment of length l2,
            the input_values_cutoffs would be: [[l1, 2 * l1], [l2, -1]].
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[config.audio_token_id, -100, -101]`.
            Requires targeted `input_values` to be provided as audio tokens will be inferred from it using the `codec_model`.
            - `config.audio_token_id` indicates an audio frames (considering sequence length elements as frames)
            - `-100` will be ignored in the loss computation
            - `-101` indicates the audio frame will be used only for the backbone model (using the first codebook token as labels)

            Such labels can be prepared using `output_labels=True` when calling [`CsmProcessor`].
        logits_to_keep (`int` or `torch.Tensor`, *optional*):
            Kept for compatibility. Does not support another value than:
            1. `0`, which is equivalent to keeping all logits, used in the training regime
            2. `1`, which is equivalent to keeping only the last logit, used in the generation regime

        Example:

        ```python
        >>> import torch
        >>> from transformers import CsmForConditionalGeneration, AutoProcessor
        >>> from datasets import load_dataset, Audio

        >>> model_id = "sesame/csm-1b"
        >>> torch_device = "cuda" if torch.cuda.is_available() else "cpu"

        >>> processor = AutoProcessor.from_pretrained(model_id)

        >>> ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
        >>> # ensure the audio is 24kHz
        >>> ds = ds.cast_column("audio", Audio(sampling_rate=24000))

        >>> conversation = []
        >>> # prepare a conversation with text and corresponding audio
        >>> for text, audio, speaker_id in zip(ds[:4]["text"], ds[:4]["audio"], ds[:4]["speaker_id"]):
        ...     conversation.append(
        ...         {
        ...             "role": f"{speaker_id}",
        ...             "content": [{"type": "text", "text": text}, {"type": "audio", "path": audio["array"]}],
        ...         }
        ...     )

        >>> inputs = processor.apply_chat_template(
        ...     conversation,
        ...     tokenize=True,
        ...     return_dict=True,
        ...     output_labels=True,
        ... ).to(torch_device)

        >>> model = CsmForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
        >>> output = model(**inputs)
        >>> output.loss.backward()
        ```Nr   r   r   )r   r   r   r(   r   r   r   r   )r'   r   r_   r   r  r  r   .r
  )r   Tr  )r   r   r   return_dictr   )r&   r0   r+   r'   r(   r)   r*   r,   r-   r.   r/   r9   )r<  r:  r   rS   r   r   r   r   rJ   r_   allrU   rw   r   r  r%  r   r&   r%   r(   r)   r*   r'   )r`   r   r  r   r  r   r(   r   r   r   r   r   r   r=  backbone_outputsbackbone_hidden_statesr   backbone_logitsr&   r0   r+   depth_decoder_outputsbackbone_labels
train_maskdepth_decoder_input_ids
train_idxsbackbone_last_hidden_statesdepth_decoder_labelss                               r;   r   z#CsmForConditionalGeneration.forwardY  s   f  Y^^q%8 CC<)=vM */:M"8,FI.4.. 	
)%+')	
 	
 "2!!48B>SV8W~ot4]k,,'=aPQ>Q'RS! $$Q1WoO.D.. &4;;KaKaekM "!Q(+t388R8@@J&,Z&8>]@Y@Y\]@]>]9]&^#&(mm&7&78OQW_`&7&a##++T+:J*@APZ[\P]`aPacdAd*e'#)*#5 $6D$6$6 %1+F# +% %! "7!;!; #55D '1",<<*88'22AVAb!6!=!=hl$0 +@*O*O$0 )>(K(KI^Ij%:%E%E
 	
 qu
 	
r:   )NNNNr   )NNNNNNNNNNr   )r1   r2   r3   r   rv   r   r   classmethodr   r  r5   r   r:  r   r   r   r6   r   r   r   r   r   r   r8   r%   r   ro   rp   s   @r;   r   r     sv    	@Aj01  41 *.,048&*PB<<$&PB llT)PB $llT1	PB
 t#PB 
	PBj ,0(,262626## "Dj 	
 ((4/ ((4/ ((4/B  .2,0.24804(,26*.!%26-.[
##d*[
 llT)[
 t+	[

 $llT1[
 &&-[
 [
 ((4/[
   4'[
 $;[
 ((4/[
 ell*[
 +,[
 
"	"[
  [
r:   r   )rI   r   rr   r   r   )Cdataclassesr   r5   torch.nnrw    r   rW   cache_utilsr   r   
generationr   masking_utilsr	   modeling_outputsr
   r   modeling_utilsr   processing_utilsr   utilsr   r   r   r   utils.genericr   utils.import_utilsr   utils.output_capturingr   autor   llama.modeling_llamar   r   r   r   r   r   r   r   configuration_csmr    r!   generation_csmr"   
get_loggerr1   r   r%   r=   rA   rC   rE   rG   rI   rr   ModulerT   r   r[   r   r   __all__r9   r:   r;   <module>r`     s   "   & . ) / O - & K K 7 : 5 	 	 	 @ . 
		H	% 
'3 '3 '3V	 		- 		X 		> 		' 	 
 v v v< ]
:'9 ]
 ]
@ryy . d
!1? d
d
N  /z / /0 
J
"46H J

J
Z
r:   