
    qi9                         d Z ddlmZmZmZmZmZmZ ddlm	Z	 ddl
mZ ddlmZ  ej                  e      Zddd	Z G d
 de      ZdgZy)z&Tokenization classes for ALBERT model.    )Regex	Tokenizerdecodersnormalizerspre_tokenizers
processors)Unigram   )TokenizersBackend)loggingzspiece.modelztokenizer.json)
vocab_filetokenizer_filec                        e Zd ZdZeZddgZeZ	 	 	 	 	 	 	 	 	 	 	 	 dde	e
ee	ef      z  dz  dedede	d	e	d
e	de	de	de	de	dedef fdZ xZS )AlbertTokenizera;  
    Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on
    [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
    tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods

    Args:
        do_lower_case (`bool`, *optional*, defaults to `True`):
            Whether or not to lowercase the input when tokenizing.
        keep_accents (`bool`, *optional*, defaults to `False`):
            Whether or not to keep accents when tokenizing.
        bos_token (`str`, *optional*, defaults to `"[CLS]"`):
            The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the beginning of
            sequence. The token used is the `cls_token`.

            </Tip>

        eos_token (`str`, *optional*, defaults to `"[SEP]"`):
            The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
            that is used for the end of sequence. The token used is the `sep_token`.
        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (`str`, *optional*, defaults to `"[SEP]"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (`str`, *optional*, defaults to `"[CLS]"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        mask_token (`str`, *optional*, defaults to `"[MASK]"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        add_prefix_space (`bool`, *optional*, defaults to `True`):
            Whether or not to add an initial space to the input. This allows to treat the leading word just as any
            other word.
        trim_offsets (`bool`, *optional*, defaults to `True`):
            Whether the post processing step should trim offsets to avoid including whitespaces.
        vocab (`str` or `list[tuple[str, float]]`, *optional*):
            Custom vocabulary with `(token, score)` tuples. If not provided, vocabulary is loaded from `vocab_file`.
        vocab_file (`str`, *optional*):
            [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
            contains the vocabulary necessary to instantiate a tokenizer.
    	input_idsattention_maskNvocabdo_lower_casekeep_accents	bos_token	eos_token	unk_token	sep_token	pad_token	cls_token
mask_tokenadd_prefix_spacetrim_offsetsc                    || _         || _        || _        || _        ||| _        nCt        |      dft        |      dft        |	      dft        |      dft        |
      dfg| _        t        t        | j                  dd            | _        t        j                  dd      t        j                  dd      t        j                         t        j                         t        j                         t        j                  t        d      d	      g}| j                  sF|j                  t        j                                |j                  t        j                                | j                  r#|j                  t        j                                |j                  t        j                  t        d      d	             t        j                   |      | j                  _        |rd
nd}t%        j                   t%        j&                         t%        j(                  d|      g      | j                  _        t-        j(                  d|      | j                  _        t1        j2                  ddd| j                  j5                  t        |	            fd| j                  j5                  t        |            fg      | j                  _        t9        | t  d| j                  | j                  ||||	|||
||d| y )Ng           F)unk_idbyte_fallbackz``"z''z {2,} alwaysneveru   ▁)replacementprepend_schemez[CLS]:0 $A:0 [SEP]:0z![CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1[CLS][SEP])singlepairspecial_tokens)r   r   r   r   r   r   r   r   r   r   r    )r   r   r   r   _vocab_scoresstrr   r	   
_tokenizerr   ReplaceNFKDStripAccents	Lowercaser   appendSequence
normalizerr   WhitespaceSplit	Metaspacepre_tokenizerr   decoderr   TemplateProcessingtoken_to_idpost_processorsuper__init__)selfr   r   r   r   r   r   r   r   r   r   r   r   kwargslist_normalizersr(   	__class__s                   `/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/albert/tokenization_albert.pyrA   zAlbertTokenizer.__init__T   s     !1(*(!&D Y%Y%Y%Y%Z#&"D $""#
 c*c*$$&!!#g4
   ##K$4$4$67##K$<$<$>?##K$9$9$;< 3 3E'NC HI%0%9%9:J%K"%57(6(?(?..0((U>Z)
% #+"4"4We"f)3)F)F)4$//55c)nEF$//55c)nEF*
& 	 	
,,**!-%	
 	
    )NTFr)   r*   z<unk>r*   z<pad>r)   z[MASK]TT)__name__
__module____qualname____doc__VOCAB_FILES_NAMESvocab_files_namesmodel_input_namesr	   modelr0   listtuplefloatboolrA   __classcell__)rE   s   @rF   r   r      s    1f *$&67E 7;""      "!%!Y
T%U
+,,t3Y
 Y
 	Y

 Y
 Y
 Y
 Y
 Y
 Y
 Y
 Y
 Y
 Y
rG   r   N)rK   
tokenizersr   r   r   r   r   r   tokenizers.modelsr	   tokenization_utils_tokenizersr   utilsr   
get_loggerrH   loggerrL   r   __all__r.   rG   rF   <module>r\      sS    - Z Z % >  
		H	%#1EUV Q
' Q
h 
rG   