o
    hn                  
   @   s>  d dl Z d dlZd dlZd dlZd dlmZ d dlmZ d dlm	Z	m
Z
mZmZmZmZ d dlmZ d dlZd dlmZ d dlZd dlmZmZ d dlmZmZ d dlmZm Z  d dl!m"Z" d	d
l#m$Z$ d	dl%m&Z& d	dl'm(Z(m)Z) d	dl*m+Z+ d	dl,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z= d	dl>m?Z?m@Z@ d	dlAmBZB eB rd dlCmDZD d dlEmFZG d dlEmHZI e<JeKZLdd ZMeejNddejOejPejPeejNddeMeejNddejQdZRd)ddZSG dd de3e(ZTe7eTjUeT_UeTjUjVdureTjUjVjWdd d!d"eTjU_Vd#d$ ZX	d*d%d&ZYd'd( ZZdS )+    N)partial)UnpicklingError)AnyDictOptionalSetTupleUnion)
FrozenDictunfreeze)
from_bytesto_bytes)flatten_dictunflatten_dict)PRNGKey   )PretrainedConfig)custom_object_save)FlaxGenerationMixinGenerationConfig)*load_pytorch_checkpoint_in_flax_state_dict)FLAX_WEIGHTS_INDEX_NAMEFLAX_WEIGHTS_NAMESAFE_WEIGHTS_INDEX_NAMESAFE_WEIGHTS_NAMEWEIGHTS_INDEX_NAMEWEIGHTS_NAMEPushToHubMixinadd_code_sample_docstrings%add_start_docstrings_to_model_forwardcached_file	copy_funcdownload_urlhas_fileis_offline_modeis_remote_urlloggingreplace_return_docstrings)convert_file_size_to_intget_checkpoint_shard_files)is_safetensors_available)	safe_open)	load_file)	save_filec                 C   s   | t jd|   S )NgZd;?)jaxnnsigmoidx r3   t/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/modeling_flax_utils.py
quick_geluE   s   r5   F)approximateT)gelurelusiluswishgelu_newr5   gelu_pytorch_tanhtanh10GBc              	   C   s  t |}g }i }d}d}t| dd}|D ]*}|| j|| jj }|| |kr0|| i }d}|| ||< ||7 }||7 }q|| t|dkrRt|d idfS i }	i }
t|D ]&\}}t	dd|d dd	t|dd}||
|< |
 D ]}||	|< qyqZd
|i}||	d}|
|fS )a  
    Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
    given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so
    there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For
    example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as
    [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].

    <Tip warning={true}>

    If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will
    have a size greater than `max_shard_size`.

    </Tip>

    Args:
        params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters.
        max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
            The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
            (like `"5MB"`).
    r   /sepr   Nz.msgpack-05dz-of-
total_size)metadata
weight_map)r(   r   sizedtypeitemsizeappendlenr   	enumeratereplacekeys)paramsmax_shard_sizesharded_state_dictscurrent_blockcurrent_block_sizerD   weightsitemweight_sizerF   shardsidxshard
shard_fileweight_namerE   indexr3   r3   r4   flax_shard_checkpointU   s:   


&

r]   c                   @   s  e Zd ZdZdZdZdZdZe Z	dde
jdfded	ejd
edede
jdefddZdIdejjd
ededefddZdd Zedd ZedefddZedefddZ edejfddZ!ede"eef fdd Z#ede$fd!d"Z%edefd#d$Z&e#j'de"eef fd%d Z#dIde"eef de
jd&e(de(fd'd(Z)dIde"eef d&e(fd)d*Z*dIde"eef d&e(fd+d,Z+dIde"eef d&e(fd-d.Z,ed/d0 Z-ed1d2 Z.edefd3d4Z/ee
jfddd5d5d5dd6d7d8e"ee0j1f de
jde2e"eee0j1f  d9e2e"ee0j1f  d:ed;ed<ed=e2e"eef  d>efd?d@Z3		5	A		5dJdBe"ee0j1f d=e2e"eef  dCefdDdEZ4edKdGdHZ5dS )LFlaxPreTrainedModela$  
    Base class for all models.

    [`FlaxPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
    downloading and saving models.

    Class attributes (overridden by derived classes):

        - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
          for this model architecture.
        - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
          classes of the same architecture adding modules on top of the base model.
        - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
          models, `pixel_values` for vision models and `input_values` for speech models).
    N 	input_ids)r   r   r   Tconfigmoduleinput_shapeseedrH   _do_initc           
      C   s   |d u rt d|d u rt d|| _|| _t|| _|| _|| _|  r*t	|nd | _
|| _|rB| | j|}tdd |}nt| j|d}	t|	| j}td| jj d || _ttt| | _|rp|| _d S d S )Nzconfig cannot be Nonezmodule cannot be Nonec                 S   s   | S Nr3   )rO   r3   r3   r4   <lambda>   s    z.FlaxPreTrainedModel.__init__.<locals>.<lambda>)rc   zVModel weights are not initialized as `_do_init` is set to `False`. Make sure to call `z2.init_weights` manually to initialize the weights.)
ValueError_config_moduler   keyrH   rc   can_generater   from_model_configgeneration_config_is_initializedinit_weightsr.   
eval_shaper   loggerinfo	__class____name___params_shape_treesetr   r   rN   _required_paramsrO   )
selfra   rb   rc   rd   rH   re   random_paramsparams_shape_treeinit_fnr3   r3   r4   __init__   s4   	

zFlaxPreTrainedModel.__init__rngrO   returnc                 C      t d|  )Nz&init method has to be implemented for NotImplementedError)ry   r~   rc   rO   r3   r3   r4   rp         z FlaxPreTrainedModel.init_weightsc                 C   r   )Nz8gradient checkpointing method has to be implemented for r   ry   r3   r3   r4   enable_gradient_checkpointing   r   z1FlaxPreTrainedModel.enable_gradient_checkpointingc                 K   s   | |fi |S )zZ
        All context managers that the model should be initialized under go here.
        r3   )clsra   kwargsr3   r3   r4   _from_config   s   z FlaxPreTrainedModel._from_configc                 C   s   dS )z=
        :str: Identifies that this is a Flax model.
        flaxr3   r   r3   r3   r4   	framework   s   zFlaxPreTrainedModel.frameworkc                 C      | j S rf   )ri   r   r3   r3   r4   ra         zFlaxPreTrainedModel.configc                 C   r   rf   )rj   r   r3   r3   r4   rb      r   zFlaxPreTrainedModel.modulec                 C   s   | j std| jS )Nz`params` cannot be accessed from model when the model is created with `_do_init=False`. You must call `init_weights` manually and store the params outside of the model and pass it explicitly where needed.)ro   rh   _paramsr   r3   r3   r4   rO      s
   zFlaxPreTrainedModel.paramsc                 C   r   rf   )rx   r   r3   r3   r4   required_params  r   z#FlaxPreTrainedModel.required_paramsc                 C   r   rf   )rv   r   r3   r3   r4   r{   
  r   z%FlaxPreTrainedModel.params_shape_treec                 C   s`   | j stdt|trt|}tt| }t| j	| dkr+td| j	|  || _
d S )Nz}`params` cannot be set from model when the model is created with `_do_init=False`. You store the params outside of the model.r   zVSome parameters are missing. Make sure that `params` include the following parameters )ro   rh   
isinstancer
   r   rw   r   rN   rK   r   r   )ry   rO   
param_keysr3   r3   r4   rO     s   

maskc           
         sr    fdd}|du rt j||S t|}t j|\}}t|t| D ]\}}	|r4|||	 ||	< q&t|S )zk
        Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.
        c                    s*   t | tjrt| jtjr|  } | S rf   )r   jnpndarray
issubdtyperH   floatingastype)paramrH   r3   r4   conditional_cast'  s   
z?FlaxPreTrainedModel._cast_floating_to.<locals>.conditional_castN)	r.   	tree_utiltree_mapr   tree_flattenzipsortedrN   r   )
ry   rO   rH   r   r   flat_params	flat_mask_maskedrk   r3   r   r4   _cast_floating_to!  s   z%FlaxPreTrainedModel._cast_floating_toc                 C      |  |tj|S )a  
        Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast
        the `params` in place.

        This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full
        half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.

        Arguments:
            params (`Union[Dict, FrozenDict]`):
                A `PyTree` of model parameters.
            mask (`Union[Dict, FrozenDict]`):
                A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
                you want to cast, and should be `False` for those you want to skip.

        Examples:

        ```python
        >>> from transformers import FlaxBertModel

        >>> # load model
        >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
        >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision
        >>> model.params = model.to_bf16(model.params)
        >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
        >>> # then pass the mask as follows
        >>> from flax import traverse_util

        >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
        >>> flat_params = traverse_util.flatten_dict(model.params)
        >>> mask = {
        ...     path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
        ...     for path in flat_params
        ... }
        >>> mask = traverse_util.unflatten_dict(mask)
        >>> model.params = model.to_bf16(model.params, mask)
        ```)r   r   bfloat16ry   rO   r   r3   r3   r4   to_bf168     %zFlaxPreTrainedModel.to_bf16c                 C   r   )ay  
        Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the
        model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.

        Arguments:
            params (`Union[Dict, FrozenDict]`):
                A `PyTree` of model parameters.
            mask (`Union[Dict, FrozenDict]`):
                A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
                you want to cast, and should be `False` for those you want to skip

        Examples:

        ```python
        >>> from transformers import FlaxBertModel

        >>> # Download model and configuration from huggingface.co
        >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
        >>> # By default, the model params will be in fp32, to illustrate the use of this method,
        >>> # we'll first cast to fp16 and back to fp32
        >>> model.params = model.to_f16(model.params)
        >>> # now cast back to fp32
        >>> model.params = model.to_fp32(model.params)
        ```)r   r   float32r   r3   r3   r4   to_fp32_  s   zFlaxPreTrainedModel.to_fp32c                 C   r   )a  
        Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the
        `params` in place.

        This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full
        half-precision training or to save weights in float16 for inference in order to save memory and improve speed.

        Arguments:
            params (`Union[Dict, FrozenDict]`):
                A `PyTree` of model parameters.
            mask (`Union[Dict, FrozenDict]`):
                A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
                you want to cast, and should be `False` for those you want to skip

        Examples:

        ```python
        >>> from transformers import FlaxBertModel

        >>> # load model
        >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
        >>> # By default, the model params will be in fp32, to cast these to float16
        >>> model.params = model.to_fp16(model.params)
        >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
        >>> # then pass the mask as follows
        >>> from flax import traverse_util

        >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
        >>> flat_params = traverse_util.flatten_dict(model.params)
        >>> mask = {
        ...     path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
        ...     for path in flat_params
        ... }
        >>> mask = traverse_util.unflatten_dict(mask)
        >>> model.params = model.to_fp16(model.params, mask)
        ```)r   r   float16r   r3   r3   r4   to_fp16z  r   zFlaxPreTrainedModel.to_fp16c                 C   s   z3| drt|}t|dd}W |S t|d}t| | }W d    W |S 1 s,w   Y  W |S  ttjj	fyy } z6zt|}| 
drPtdt|1 sVw   Y  W n ttfym   td| dw W Y d }~|S d }~ww )	N.safetensors.r@   rbversionYou seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.Unable to convert   to Flax deserializable object. )endswithsafe_load_filer   openr   readr   msgpack
exceptions	ExtraData
startswithOSErrorrh   UnicodeDecodeErrorEnvironmentError)r   resolved_archive_filestatestate_fefr3   r3   r4   load_flax_weights  s6   

	
z%FlaxPreTrainedModel.load_flax_weightsc                 C   s   i }|D ]r}zt |d}t| | }W d   n1 sw   Y  W nA ttjjfyU } z!t |}| dr@tdt	|1 sFw   Y  W Y d}~nd}~w t
t	fye   td| dw t|dd}|| ~t  qt|ddS )	ab  
        This is the same as [`flax.serialization.from_bytes`]
        (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.

        This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
        loaded in the model.

        Args:
            shard_files (`List[str]`:
                The list of shard files to load.

        Returns:
            `Dict`: A nested dictionary of the model parameters, in the expected format for flax models : `{'model':
            {'params': {'...'}}}`.
        r   Nr   r   r   r   r?   r@   )r   r   r   r   r   r   r   r   r   rh   r   r   r   updategccollectr   )r   shard_filesstate_sharded_dictrZ   r   r   r   r   r3   r3   r4   load_flax_sharded_weights  s0   
	

z-FlaxPreTrainedModel.load_flax_sharded_weightsc                 C   s$   dt | jv rdt | jv rdS dS )z
        Returns whether this model can generate sequences with `.generate()`. Returns:
            `bool`: Whether this model can generate sequences with `.generate()`.
        GenerationMixinFT)strprepare_inputs_for_generationgenerate)r   r3   r3   r4   rl     s   z FlaxPreTrainedModel.can_generateFmain)ra   	cache_dirignore_mismatched_sizesforce_downloadlocal_files_onlytokenrevisionpretrained_model_name_or_pathr   r   r   r   r   r   c          0         s
  | dd}| dd}| dd}| dd}| dd}| dd}| d	d}| d
d}| dd}| dd}| dd}|durVtdt |durTtd|}|du r_td dd|d}|durm||d< t ry|sytd d}t	|t
s|dur|n|}| jj|f|d||||||	||||d|\}}n| }|du rt|dd}||d< d}|durt|}tj|}tj|rtjtj||trtj||t}ntjtj||trtj||t}d}nt rtjtj|trtj|t}nn|r+tjtj||tr+tj||t}nT|rGtjtj||trGtj||t}d}n8t rdtjtj|trdtj|t}d}tdtjtj||tr|tdt d| dtdt dt d| dtjtj||r|}d}nt|r|}t|}n|rt}nt}z||||||||	|dd|d }t ||fi |}|du r|tkrt |tfi |}|durd}|du r|rt |tfi |}|durd}|du r
t}t |tfi |}|du r\|	||||d!} t!|tfi | r'd}tdt!|tfi | r;t| d"t dt!|tfi | rOt| d"t d#t| d"t dt dW n! tyf     t"y~   td$| d%| d&t dt d	w |rtd'|  |}|#tjj$d( }ntd'| d)|  nd}|rt%||||||||||	||d*\}}d}!|tkrt&|dd+}"|"' }#W d   n	1 sw   Y  |#du s|#(d,d-vrt)d.| d/|#(d,d0k}!| |g|
R d
|i|}$|s
|!rt*|$||}%n#|r| +|}%n| ,|}%|r+t-j./t0j1|%}%n	t-j./d1d2 |%}%d3|%v r| j2t3|$j4d4 vr^| j2|%d4 v r^|%d4 | j2 |%d4< |%d3 | j2 |%d3< | j2t3|$j4d4 v r| j2|%d4 vr| j2|%d4 i| j2|%d3 id5}%n(| j2t3|$j4vr| j2|%v r|%| j2 }%| j2t3|$j4v r| j2|%vr| j2|%i}%t5|%}%t5t6|r|$j7n|$j4}&|$j8t9|%:  }'t9|%: |$j8 }(|( D ]})d6|)d( v r|(;|) q|'r|std7| d8|' d9 |'| _<g }*|%: D ]@}+|+|&v r:|%|+ j=|&|+ j=kr:|r&|*>|+|%|+ j=|&|+ j=f |&|+ |%|+< qtd:|+ d;|%|+ j= d<|&|+ j= d=q|'rN|rN|'D ]	},|&|, |%|,< qD|(D ]})|%|)= qPt?|(d>krztd?| d@|$j@jA dA|( dB|$j@jA dC|$j@jA dD ntdE|$j@jA dF t?|'d>krtdG|$j@jA dH| dI|' dJ nt?|*d>krtdK|$j@jA dL| dM|$j@jA dN t?|*d>krdOdPdQ |*D }-tdG|$j@jA dH| dR|- dJ t-j./dSd2 |%  fdTdQ D }. fdUdQ D }/t?|.d>krtdV|$j@jA dW| dX|. dY t?|/d>kr%tdV|$j@jA dZ| dX|/ dY |$B rQztCj|f|||||||	|||d[
||$_DW n t)yP   td\ Y nw |r[tE|%|$_7|$S |$tE|%fS )]aw  
        Instantiate a pretrained flax model from a pre-trained model configuration.

        The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.

        The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
        weights are discarded.

        Parameters:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *pt index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case,
                      `from_pt` should be set to `True`.
            dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
                The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
                `jax.numpy.bfloat16` (on TPUs).

                This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
                specified all the computation will be performed with the given `dtype`.

                **Note that this only specifies the dtype of the computation and does not influence the dtype of model
                parameters.**

                If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
                [`~FlaxPreTrainedModel.to_bf16`].
            model_args (sequence of positional arguments, *optional*):
                All remaining positional arguments will be passed to the underlying model's `__init__` method.
            config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*):
                Can be either:

                    - an instance of a class derived from [`PretrainedConfig`],
                    - a string or path valid as input to [`~PretrainedConfig.from_pretrained`].

                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
                be automatically loaded when:

                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
                      model).
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_pt (`bool`, *optional*, defaults to `False`):
                Load the model weights from a PyTorch checkpoint save file (see docstring of
                `pretrained_model_name_or_path` argument).
            ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
                Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
                as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
                checkpoint with 3 labels).
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (i.e., do not try to download the model).
            token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.


                <Tip>

                To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

                </Tip>

            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.
            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
                automatically loaded:

                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.

        Examples:

        ```python
        >>> from transformers import BertConfig, FlaxBertModel

        >>> # Download model and configuration from huggingface.co and cache.
        >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
        >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
        >>> model = FlaxBertModel.from_pretrained("./test/saved_model/")
        >>> # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
        >>> config = BertConfig.from_json_file("./pt_model/config.json")
        >>> model = FlaxBertModel.from_pretrained("./pt_model/pytorch_model.bin", from_pt=True, config=config)
        ```from_ptFresume_downloadNproxiesuse_auth_tokentrust_remote_code_from_pipeline
_from_autore   T	subfolderr_   _commit_hashadapter_kwargsrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.V`token` and `use_auth_token` are both specified. Please set only the argument `token`.zgThe argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.modelr   )	file_typer   from_auto_classusing_pipelinez+Offline mode: forcing local_files_only=True)r   return_unused_kwargsr   r   r   r   r   r   r   r   r   r   rH   zASupport for sharded checkpoints using safetensors is coming soon!zError no file named z found in directory zc but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those weights.z or r   )r   r   r   r   r   r   
user_agentr   r    _raise_exceptions_for_gated_repo%_raise_exceptions_for_missing_entriesr   )r   r   r   r   r   z& does not appear to have a file named zk but there is a sharded file for PyTorch weights. Use `from_pt=True` to load this model from those weights.zCan't load the model for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z=' is the correct path to a directory containing a file named zloading weights file z from cache at )
r   r   r   r   r   r   r   r   r   r   )r   format)pttfr   z"The safetensors archive passed at zf does not contain the valid metadata. Make sure you save your model with the `save_pretrained` method.r   c                 S   s   t | t jddd S )Ncpu)backendr   )r.   
device_putlocal_devicesr1   r3   r3   r4   rg     s    z5FlaxPreTrainedModel.from_pretrained.<locals>.<lambda>batch_statsrO   )rO   r   num_batches_trackedzThe checkpoint z is missing required keys: zI. Make sure to call model.init_weights to initialize the missing weights.z)Trying to load the pretrained weight for z failed: checkpoint has shape z, which is incompatible with the model shape zd. Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this model.r   z(Some weights of the model checkpoint at z! were not used when initializing z: z,
- This IS expected if you are initializing z from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing z from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).z9All model checkpoint weights were used when initializing z.
zSome weights of z3 were not initialized from the model checkpoint at z and are newly initialized: zo
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.zAll the weights of z/ were initialized from the model checkpoint at zf.
If your task is similar to the task the model of the checkpoint was trained on, you can already use z* for predictions without further training.
c              	   S   s*   g | ]\}}}d | d| d| dqS )z- z: found shape z in the checkpoint and z in the model instantiatedr3   ).0rk   shape1shape2r3   r3   r4   
<listcomp>  s    z7FlaxPreTrainedModel.from_pretrained.<locals>.<listcomp>z= and are newly initialized because the shapes did not match:
c                 S   r   rf   r   r1   r3   r3   r4   rg     s    c                       g | ]} | t jkr|qS r3   )r   r   r   kparam_dtypesr3   r4   r         c                    r   r3   )r   r   r   r   r3   r4   r     r   zSome of the weights of zD were initialized in float16 precision from the model checkpoint at z:
z
You should probably UPCAST the model weights to float32 if this was not intended. See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this.zE were initialized in bfloat16 precision from the model checkpoint at )
r   r   r   r   r   r   r   r   r   r   zZGeneration config file not found, using a generation config created from the model config.)FpopwarningswarnFutureWarningrh   rr   warningr$   rs   r   r   config_classfrom_pretrainedcopygetattrr   ospathisdirisfilejoinr   r   r*   r   r   r   r   r   r   r%   r"   r    r#   	ExceptionsplitrA   r)   r+   rE   getr   r   r   r   r.   r   r   r   arraybase_model_prefixdictr{   r   r   rO   r   rw   rN   remove_missing_keysshaperJ   rK   rt   ru   rl   r   rn   r   )0r   r   rH   ra   r   r   r   r   r   r   
model_argsr   r   r   r   r   r   from_pipeliner   re   r   commit_hashr   r   config_pathmodel_kwargs
is_shardedis_localarchive_filefilenamer   cached_file_kwargshas_file_kwargssafetensors_from_ptr   safetensors_metadatar   r   random_statemissing_keysunexpected_keysunexpected_keymismatched_keysrk   missing_keymismatched_warningfp16_paramsbf16_paramsr3   r   r4   r    s   



 





 	








 



z#FlaxPreTrainedModel.from_pretrainedr>   save_directorysafe_serializationc              	   K   sX  | dd}|durtdt |durtd|}|dur"||d< tj|r3t	d| d dS tj
|dd	 |r]| d
d}	| d|tjjd }
| j|
fi |}
| |}tj|}| jjdd g| j_| jdur{t| || jd | j| |  r| j| |rtnt}tj||}t|dur|n| j|\}}t|D ])}tj||}|dddd}| |rtj|r||! vrt"| q|du r|r|dur|n| j}t#|dd}t$||ddid nt%|d}|dur|n| j}t&|}|'| W d   n	1 sw   Y  nttj|t(}t%|ddd}t)j*|dddd }|'| W d   n	1 sHw   Y  t+d| dt,| d | d |- D ]0\}}t%tj||dd!}t.|d"d}t&|}|'| W d   n	1 sw   Y  qbt+d#|  |r| j/||
||	|d$ dS dS )%a  
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
        `[`~FlaxPreTrainedModel.from_pretrained`]` class method

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to which to save. Will be created if it doesn't exist.
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
                The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
                lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).

                <Tip warning={true}>

                If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
                which will be bigger than `max_shard_size`.

                </Tip>

            token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
            kwargs (`Dict[str, Any]`, *optional*):
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
            safe_serialization (`bool`, *optional*, defaults to `False`):
                Whether to save the model using `safetensors` or through msgpack.
        r   Nr   r   r   zProvided path (z#) should be a directory, not a fileT)exist_okcommit_messagerepo_idr      )ra   z.binr_   r   r   r@   r   r   )rE   wbwzutf-8)encoding   )indent	sort_keysr   z:The model is bigger than the maximum size per checkpoint (z) and is going to be split in z^ checkpoint shards. You can find where each parameters has been saved in the index located at )moder?   zModel weights saved in )r0  r   )0r   r  r  r  rh   r	  r
  r  rr   errormakedirsr  rA   _create_repo_get_files_timestampsabspathrt   ru   ra   architectures_auto_classr   save_pretrainedrl   rn   r   r   r  r]   rO   listdirrM   r   rN   r  r   safe_save_filer   r   writer   jsondumpsrs   rK   itemsr   _upload_modified_files)ry   r-  rO   push_to_hubrP   r   r.  r   r   r0  r1  files_timestampsweights_nameoutput_model_filerW   r\   r  full_filenameweights_no_suffix	flat_dictr   model_bytessave_index_filecontentrZ   rY   shard_bytesr3   r3   r4   rA  0  s   (





z#FlaxPreTrainedModel.save_pretrainedFlaxAutoModelc                 C   sD   t |ts|j}ddlm  m} t||st| d|| _dS )a  
        Register this class with a given auto class. This should only be used for custom models as the ones in the
        library are already mapped with an auto class.

        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"FlaxAutoModel"`):
                The auto class to register this new model with.
        r   Nz is not a valid auto class.)	r   r   ru   transformers.models.automodelsautohasattrrh   r@  )r   
auto_classauto_moduler3   r3   r4   register_for_auto_class  s   


z+FlaxPreTrainedModel.register_for_auto_classrf   )NFr>   NF)rT  )6ru   
__module____qualname____doc__r  r  main_input_namer@  rw   r  r   r   r   r/   Moduler   intrH   boolr}   r.   randomr   r
   r   rp   r   classmethodr   propertyr   r   ra   rb   r	   rO   r   r   r{   setterr   r   r   r   r   r   r   rl   r	  PathLiker   r  rA  r[  r3   r3   r3   r4   r^      s    
 3
	&''

.	
    A
 r^   r   rT  zmodel checkpoint)objectobject_classobject_filesc                 C   s(   t | j| _d | j_t|| j| _d S rf   )r!   __call__r^  r   )model_class	docstringr3   r3   r4   overwrite_call_docstring  s   rn  c                 C   s.   t | j| _t|||| j||d| j| _d S )N)
checkpointoutput_typer  	model_clsr   real_checkpoint)r!   rk  r   ru   )rl  ro  rp  r  r   r   rr  r3   r3   r4   append_call_sample_docstring  s   
rs  c                 C   s$   t | j| _t||d| j| _d S )N)rp  r  )r!   rk  r'   )rl  rp  r  r3   r3   r4    append_replace_return_docstrings  s   
rt  )r>   )NNN)[r   rE  r	  r  	functoolsr   pickler   typingr   r   r   r   r   r	   
flax.linenlinenr/   r.   	jax.numpynumpyr   msgpack.exceptionsr   flax.core.frozen_dictr
   r   flax.serializationr   r   flax.traverse_utilr   r   
jax.randomr   configuration_utilsr   dynamic_module_utilsr   
generationr   r   modeling_flax_pytorch_utilsr   utilsr   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   	utils.hubr(   r)   utils.import_utilsr*   safetensorsr+   safetensors.flaxr,   r   r-   rC  
get_loggerru   rr   r5   r7   r8   r:   r=   ACT2FNr]   r^   rI  r^  r   rn  rs  rt  r3   r3   r3   r4   <module>   sp    L

A        H


