
    qi                         d Z ddlZddlmZmZmZmZmZ ddlm	Z	 ddl
mZ ddlmZ  ej                  e      Zdd	d
Zd Z G d de      ZdgZeZy)zTokenization classes for Bert.    N)	Tokenizerdecodersnormalizerspre_tokenizers
processors)	WordPiece   )TokenizersBackend)loggingz	vocab.txtztokenizer.json)
vocab_filetokenizer_filec                     t        j                         }t        | dd      5 }|j                         }ddd       t	              D ]  \  }}|j                  d      }|||<    |S # 1 sw Y   4xY w)z*Loads a vocabulary file into a dictionary.rzutf-8)encodingN
)collectionsOrderedDictopen	readlines	enumeraterstrip)r   vocabreadertokensindextokens         \/opt/pipecat/venv/lib/python3.12/site-packages/transformers/models/bert/tokenization_bert.py
load_vocabr      sw    ##%E	j#	0 $F!!#$!&) uT"e L$ $s   A''A0c                        e Zd ZdZeZg dZeZ	 	 	 	 	 	 	 	 	 dde	e
e	ef   z  dz  dede	de	de	d	e	d
e	dededz  f fdZ xZS )BertTokenizera  
    Construct a BERT tokenizer (backed by HuggingFace's tokenizers library). Based on WordPiece.

    This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Args:
        vocab (`str` or `dict[str, int]`, *optional*):
            Custom vocabulary dictionary. If not provided, vocabulary is loaded from `vocab_file`.
        do_lower_case (`bool`, *optional*, defaults to `True`):
            Whether or not to lowercase the input when tokenizing.
        unk_token (`str`, *optional*, defaults to `"[UNK]"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (`str`, *optional*, defaults to `"[SEP]"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        pad_token (`str`, *optional*, defaults to `"[PAD]"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (`str`, *optional*, defaults to `"[CLS]"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        mask_token (`str`, *optional*, defaults to `"[MASK]"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
            Whether or not to tokenize Chinese characters.
        strip_accents (`bool`, *optional*):
            Whether or not to strip all accents. If this option is not specified, then it will be determined by the
            value for `lowercase` (as in the original BERT).
    )	input_idstoken_type_idsattention_maskNr   do_lower_case	unk_token	sep_token	pad_token	cls_token
mask_tokentokenize_chinese_charsstrip_accentsc
                 
   || _         || _        |	| _        |9t        |      dt        |      dt        |      dt        |      dt        |      di}|| _        t        t        | j                  t        |                  | _        t        j                  d||	|      | j                  _
        t        j                         | j                  _        t        j                  d	
      | j                  _        t!        | D  d||||||||	d|
 | j$                  | j$                  nd}| j&                  | j&                  nd}t)        j*                  t        | j,                         dt        | j.                         dt        | j,                         dt        | j.                         dt        | j.                         dt        | j,                        |ft        | j.                        |fg      | j                  _        y )Nr         r	      )r%   T)
clean_texthandle_chinese_charsr+   	lowercasez##)prefix)r$   r%   r&   r'   r(   r)   r*   r+   z:0 $A:0 z:0z:0 $B:1 z:1)singlepairspecial_tokens )r$   r*   r+   str_vocabr   r   
_tokenizerr   BertNormalizer
normalizerr   BertPreTokenizerpre_tokenizerr   decodersuper__init__cls_token_idsep_token_idr   TemplateProcessingr(   r&   post_processor)selfr   r$   r%   r&   r'   r(   r)   r*   r+   kwargsrB   rC   	__class__s                r   rA   zBertTokenizer.__init__O   s    +&<#*=IIIIJE #IdkkS^$TU%0%?%?!7'#	&
" )7(G(G(I%"*"4"4D"A 
	
'!#9'
	
 
	
 -1,=,=,It((q,0,=,=,It((q)3)F)F$..)*(3t~~3F2GrJ'(T^^1D0EXcRVR`R`NaMbbdeT^^$l3T^^$l3*
&    )	NTz[UNK]z[SEP]z[PAD]z[CLS]z[MASK]TN)__name__
__module____qualname____doc__VOCAB_FILES_NAMESvocab_files_namesmodel_input_namesr   modelr8   dictintboolrA   __classcell__)rH   s   @r   r    r    )   s    B *IE .2"    "'+%)8
T#s(^#d*8
 8
 	8

 8
 8
 8
 8
 !%8
 d{8
 8
rI   r    )rM   r   
tokenizersr   r   r   r   r   tokenizers.modelsr   tokenization_utils_tokenizersr
   utilsr   
get_loggerrJ   loggerrN   r   r    __all__BertTokenizerFastr7   rI   r   <module>r^      sa    %  S S ' >  
		H	%#.BRS ^
% ^
B 
! rI   