o
    hQ                    @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
Z
ddlZddlZddlZddlZddlZddlZddlZddlmZ ddlmZ ddlmZmZmZmZmZ ddlmZ ddlmZ  ddl!Z"ddl#Z#ddl$m%Z& ddl'm(Z(m)Z)m*Z* dd	l+m,Z, dd
l#m-Z- ddl.m/Z/m0Z0m1Z1m2Z2m3Z3 ddl4m5Z5 ddl6m7Z7 ddl8m9Z9m:Z:m;Z; ddl<m=Z=m>Z> ddl?m@Z@ ddlAmBZB ddlCmDZDmEZE ddlFmGZG ddlHmIZImJZJmKZK ddlLmMZM ddlNmOZO ddlPmQZQmRZRmSZS ddlTmUZUmVZV ddlWmXZXmYZY ddlZm[Z[ ddl\m]Z]m^Z^ ddl_m`Z` ddlambZbmcZcmdZdmeZemfZfmgZgmhZhmiZi ddljmkZkmlZlmmZmmnZnmoZompZpmqZqmrZrmsZsmtZtmuZumvZvmwZwmxZxmyZymzZzm{Z{m|Z|m}Z}m~Z~ ddlmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ dd lmZmZmZ dd!lmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ dd"lmZ dd#lmZ ecgZefZe rdd$lmZ eZe rdd%lmZ e rddlZe rPddlm  mZ ddlm  mZ ddlm5Z e,eءe,ekZeڐrOddlm%  mZ ddlmZ nd&Ze r|ddlm  m#Z ddlm5Z e,ee,d'kZdd(ljmZmZmZmZ nd&Ze rddlZe rdd)lmZ e rdd*lmZmZ ddlm5Z dd+lmZ dd,lmZmZmZmZmZmZmZ e2gZe,ee,d-krdd.lmZ e,ee,d/krdd0lm Z  ee g7 ZeK rdd1lmZ ed2r dd3lmZ d4d5 Zd6d7 Zd8d9 Zer ddlZe r ddlZeeZ	d:Z
d;Zd<Zd=Zd>Zd?Zd@ZG dAdB dBZdS )Cuc   
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
    N)Mapping)Path)TYPE_CHECKINGAnyCallableOptionalUnion   )#get_reporting_integration_callbacks)	ModelCardcreate_repoupload_folder)version)nn)
DataLoaderDatasetIterableDatasetRandomSamplerSequentialSampler)__version__)PretrainedConfig)DataCollatorDataCollatorWithPaddingdefault_data_collator)DebugOptionDebugUnderflowOverflow)SequenceFeatureExtractor)FeatureExtractionMixin)"ALL_HYPERPARAMETER_SEARCH_BACKENDSdefault_hp_search_backend)BaseImageProcessor)deepspeed_initdeepspeed_load_checkpointis_deepspeed_available)tpu_spmd_dataloader)TrainingSummary)PreTrainedModelload_sharded_checkpointunwrap_model)!MODEL_FOR_CAUSAL_LM_MAPPING_NAMESMODEL_MAPPING_NAMES)	Adafactorget_scheduler)ProcessorMixin)ALL_LAYERNORM_LAYERS"is_torch_greater_or_equal_than_2_3)PreTrainedTokenizerBase)CallbackHandlerDefaultFlowCallbackExportableStatePrinterCallbackProgressCallbackTrainerCallbackTrainerControlTrainerState)DistributedTensorGathererEvalLoopContainerIterableDatasetShardLabelSmootherLayerWiseDummyOptimizerLengthGroupedSamplerSequentialDistributedSamplerdistributed_broadcast_scalarsdistributed_concatfind_batch_sizeget_model_param_countget_module_class_from_nameget_parameter_namesnested_concatnested_detachnested_numpifynested_xla_mesh_reducereissue_pt_warningsremove_dummy_checkpointset_rng_state_for_device)PREFIX_CHECKPOINT_DIRBestRunEvalLoopOutputEvalPredictionHPSearchBackendHubStrategyPredictionOutputRemoveColumnsCollatorSaveStrategyTrainerMemoryTrackerTrainOutputcheck_target_module_existsdefault_compute_objectivedenumpify_detensorizeenable_full_determinismfind_executable_batch_sizeget_last_checkpoint
has_lengthneftune_post_forward_hooknumber_of_argumentsseed_workerset_seedspeed_metrics)OptimizerNamesParallelModeTrainingArguments))ADAPTER_CONFIG_NAMEADAPTER_SAFE_WEIGHTS_NAMEADAPTER_WEIGHTS_NAMECONFIG_NAMESAFE_WEIGHTS_INDEX_NAMESAFE_WEIGHTS_NAMEWEIGHTS_INDEX_NAMEWEIGHTS_NAMEXLA_FSDPV2_MIN_VERSIONPushInProgressPushToHubMixincan_return_lossfind_labelsis_accelerate_availableis_apex_availableis_apollo_torch_availableis_bitsandbytes_availableis_datasets_availableis_galore_torch_availableis_grokadamw_availableis_in_notebookis_ipex_availableis_liger_kernel_availableis_lomo_availableis_peft_availableis_safetensors_availableis_sagemaker_dp_enabledis_sagemaker_mp_enabledis_schedulefree_availableis_torch_compile_availableis_torch_hpu_availableis_torch_mlu_availableis_torch_mps_availableis_torch_musa_availableis_torch_neuroncore_availableis_torch_npu_availableis_torch_xla_availableis_torch_xpu_availableis_torchao_availablelogging	strtobool)deprecate_kwarg)QuantizationMethod)NotebookProgressCallback)ampFz1.10)smp_forward_backwardsmp_forward_only
smp_gathersmp_nested_concat)	PeftModel)Acceleratorskip_first_batches)AcceleratorState)AutocastKwargsDistributedDataParallelKwargsDistributedTypeload_fsdp_modelload_fsdp_optimizersave_fsdp_modelsave_fsdp_optimizer1.3.0)TorchTensorParallelPluginz0.23.0)SeedableRandomSampler)DeepSpeedSchedulerWrapper0.28.0)DataLoaderConfigurationc                 C   sZ   t  r+t  r	tfnd}ttjdtdkr&ddlm} g ||R }t| |S dS )N peftz0.7.0r   )PeftMixedModelF)	r   r   r   parse	importlibmetadatar   r   
isinstance)modelclasses_to_checkr   r   r   h/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/trainer.py_is_peft_model  s   
r   c                   C   s&   t  rdtttjv rddiS i S )Nadapter_onlyT)rt   listinspect	signaturer   
parametersr   r   r   r   _get_fsdp_ckpt_kwargs  s   r   c                  C   sx   t tjjt djk rt S t tjt dkrtjntj	} | j
jtjtjg}|tttjg7 }tj|S )Nz2.62.0.0)r   r   torchr   release
contextlibnullcontextnp_corecore
multiarray_reconstructndarraydtypetypeuint32serializationsafe_globals)np_core	allowlistr   r   r   r     s   "r   ztraining_args.binztrainer_state.jsonzoptimizer.ptz	scaler.ptzoptimizer.binzscheduler.ptpytorch_model_fsdpc                   @   s"	  e Zd ZdZddlmZmZmZmZm	Z	 e
ddddd																					
				ddeeejd	f dedee deeeedf  deeeeeef df  deeeeeef  deeg ef  dee deeegef  deee  deeej j! eej j"j# f deee$ej j! eee%f f  deeej&ej&gej&f  fddZ'e(dee fddZ)e)j*dddZ)dd Z+d d! Z,d"d# Z-d$d% Z.d&d' Z/d(d) Z0d*d+ Z1dd,dd-ee fd.d/Z2		dded-ee defd0d1Z3deej4j5j6 fd2d3Z7de8fd4d5Z9dedeej4j5j6 fd6d7Z:ddeeeef  de8fd8d9Z;d:ede8fd;d<Z<d=e=fd>d?Z>dee fd@dAZ?dBdC Z@dDdE ZAdFdG ZBddHeeeejjCjDf  fdIdJZEeF		ddedee dee%e%f fdKdLZGdd=e=dMej j!fdNdOZHdPe8de=fdQdRZIeFddSe8dTee= de=fdUdVZJdWedXeee%f f fdYdZZKdWedXeee%f f d[e=d\eeeLf fd]d^ZMd_efd`daZNddbdcZOddedfZPddejQfdgdhZRdidj ZSddkdlZT						ddmeeeeUf  dWedXeee%f d	f dneee  fdodpZV		d dqdrZWdsdt ZXddudvZYdwdx ZZdydz Z[dd{d|Z\		dd}d~Z]dd Z^dd Z_dd Z`dd Zadd Zbdd Zcdd Zddd Zedd Zf										ddeedXgeeeLf f  deeeeeLf geLf  de=deeee f deedegf  deedXgef  deeheeh f fddZiddeeeLf deeL dd	fddZjdeej&e%f deej&e%f fddZkdeeeej&e%f f deeeej&e%f f fddZldd ZmddeeU fddZn		ddejdeeeej&e%f f dej&fddZodddZpdeUfddZqdeUfddZrddee deUfddZsddee fddZtd
dee fddZudd Zvd	ewddfdee fddZxddddZy					ddeeeeeef f  deee  dedeeeLf fddńZz	Ɛdd:edeee  dede{fddȄZ|					ddPe8d-edeeU deee  dede}fdd˄Z~ddd̈́Z		ddejdeeeej&e%f f deUdeee  deeej& eej& eej& f f
ddτZdeeeej&e%f f fddфZddee fddԄZ																		ddee dee deeee d	f dee dee deeee d	f deeee d	f d,eeee d	f deeee d	f fddބZdd Zdd Z						ddee deUdee dee def
ddZ					ddPe8d-edeeU deee  dede}fddZdd ZdddZdd ZdddZdd Zdd ZdedPe8de=fddZd	S (	  TraineruA  
    Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.

    Args:
        model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
            The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.

            <Tip>

            [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
            your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
            models.

            </Tip>

        args ([`TrainingArguments`], *optional*):
            The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
            `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
        data_collator (`DataCollator`, *optional*):
            The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
            default to [`default_data_collator`] if no `processing_class` is provided, an instance of
            [`DataCollatorWithPadding`] otherwise if the processing_class is a feature extractor or tokenizer.
        train_dataset (Union[`torch.utils.data.Dataset`, `torch.utils.data.IterableDataset`, `datasets.Dataset`], *optional*):
            The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
            `model.forward()` method are automatically removed.

            Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
            distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
            `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
            manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
            sets the seed of the RNGs used.
        eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`, `datasets.Dataset`]), *optional*):
             The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
             `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
             dataset prepending the dictionary key to the metric name.
        processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
            Processing class used to process the data. If provided, will be used to automatically process the inputs
            for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
            reuse the fine-tuned model.
            This supercedes the `tokenizer` argument, which is now deprecated.
        model_init (`Callable[[], PreTrainedModel]`, *optional*):
            A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
            from a new instance of the model as given by this function.

            The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
            be able to choose different architectures according to hyper parameters (such as layer count, sizes of
            inner layers, dropout probabilities etc).
        compute_loss_func (`Callable`, *optional*):
            A function that accepts the raw model outputs, labels, and the number of items in the entire accumulated
            batch (batch_size * gradient_accumulation_steps) and returns the loss. For example, see the default [loss function](https://github.com/huggingface/transformers/blob/052e652d6d53c2b26ffde87e039b723949a53493/src/transformers/trainer.py#L3618) used by [`Trainer`].
        compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
            The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
            a dictionary string to metric values. *Note* When passing TrainingArgs with `batch_eval_metrics` set to
            `True`, your compute_metrics function must take a boolean `compute_result` argument. This will be triggered
            after the last eval batch to signal that the function needs to calculate and return the global summary
            statistics rather than accumulating the batch-level statistics
        callbacks (List of [`TrainerCallback`], *optional*):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in [here](callback).

            If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
        optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`):
            A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your
            model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
        optimizer_cls_and_kwargs (`Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]`, *optional*):
            A tuple containing the optimizer class and keyword arguments to use.
            Overrides `optim` and `optim_args` in `args`. Incompatible with the `optimizers` argument.

            Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before initializing the Trainer.
        preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
            A function that preprocess the logits right before caching them at each evaluation step. Must take two
            tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
            by this function will be reflected in the predictions received by `compute_metrics`.

            Note that the labels (second parameter) will be `None` if the dataset does not have them.

    Important attributes:

        - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
          subclass.
        - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
          original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
          the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
          model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
        - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
          data parallelism, this means some of the model layers are split on different GPUs).
        - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
          to `False` if model parallel or deepspeed is used, or if the default
          `TrainingArguments.place_model_on_device` is overridden to return `False` .
        - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
          in `train`)

    r	   )_get_learning_ratelog_metricsmetrics_formatsave_metrics
save_state	tokenizerprocessing_classz5.0.0T)new_namer   raise_if_both_namesNNNr   argsdata_collatortrain_datasetzdatasets.Dataseteval_dataset
model_initcompute_loss_funccompute_metrics	callbacks
optimizersoptimizer_cls_and_kwargspreprocess_logits_for_metricsc                  C   s	  |d u rd}t d| d t|d}|jr)|	d ur)dt|	j vr)td|j	d ur@|j	dkr@|d u r@td|j	 d	|j
tjksI|jrR|jd u rRtd
|| _|| _| jjrbt| jjnt| jj d | _d | _d| _|   t| jj| _| j  | }t| |j  |d u r|d ur|| _!| " }nt#d|d urt$%dt& || _!|j'j(t)v rtd|j'j( dt*|ddrt*|ddrd| _+nd| _+t*|dd d urdd t,|j-. D }t/|dkrd| _+nt/|dkr| jj0t10|d k| _+nd| _+| j+rt d | jj2rKt3 rGddl4m5} t6|t7r+||d n t8|drAt6|9 t7rA||9 d n
t :d nt;dt*|ddoXt*|dd }t*|d d d uoe|j<j=}t*|d d d uout*|j<d!d}|rt8|d"rtd#|rt>|s|std$|r|std%|j<j?j@ d&|j<j?j@ |jAd' | _Bt/|jCdkr| jDrtd(|jAd' s|jEtFjGkrtd)|jH| _H| j+s| jDs|jIs|jJr|jKr| jBs| jLrd| _H|d urt6|tMtNfrtO|ntP}|d ur|n|| _Q|| _R|| _S|| _T| jHr-t*|d*d tUjVks-| W||j0 | j+r5d| j_X|| _Y|| _Z| j[\|}t>|sI|j]n|9 j]}t|j}t8|d+r_|j^| __nt`d,d- |. D | __|ja| _a|	| _b|| _c|\| _d| _e|| _f| jfd ur| jdd urt#d.|d ur| jdd us| jed urt#d/tg r| jdd ur| jZ D ]}|j0} | jdjhD ]}t/|d0 dkr|d0 d j0} nq||krtd1| jBs| jLr| jdd us| jed urt#d2titj| jjk }|
d u r|n||
 }
tl|
| jZ| jT| jd| je| _m| n| jjortpntq d| _rd | _s| jjtr.| u  | jjvr<twjx| jjydd3 tz| jQsPtzt*| jQd4d rPtd5|j{dkra|j|dkrat d6 |d urut}|su|j{dkrutd7|d urt6|t1j~jjr|jrtd8d | _d| _d| _t r|jrtd9tr|jtjjjkrt :d:tjjj d;|j d<tjjj  tjjj|_nt8tjjd=rt :d:tjjj d> |js|jr|jd?kr|j0t10d@kr|jrtstdAndB|_t dC|j dD |js|jr;| jDs;t s;|jdBkr*d| _t1j| _n|jdEkr;t s8t;dFd| _| jjdkrKt| jjdG| _nd | _t | _t|  |  dHd | jmj| jg D dI| _d| _d | _t>| jZr| jjd u rt :dJ| jZj'j( dK t| jZj'}| jjd u r|n| jj| _t| jZj'| _| jm| j| j| j| _|j| _d| _| j  |jrt st#dL|jAdMd| _| jrtstdNt }ttjtt||dfdOdP | jBo| j | _d S )QNtmp_trainerz1No `TrainingArguments` passed, using `output_dir=z`.)
output_dircompute_resultzWhen using `batch_eval_metrics`, your `compute_metrics` function must take a `compute_result` boolean argument which will be triggered after the last batch of the eval set to signal that the summary statistics should be returned by the function.noz%You have set `args.eval_strategy` to zx but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`. z`args.metric_for_best_model` must be provided when using 'best' save_strategy or if `args.load_best_model_at_end` is set to `True`.Fz<`Trainer` requires either a `model` or `model_init` argumentz`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.zThe model you have picked (a  ) cannot be used as is for training: it only computes hidden states and does not accept any labels. You should choose a model with a head suitable for your task like any of the `AutoModelForXxx` listed at https://huggingface.co/docs/transformers/model_doc/autois_parallelizablemodel_parallelThf_device_mapc                 S   s   g | ]}|d vr|qS ))cpudiskr   ).0devicer   r   r   
<listcomp>      z$Trainer.__init__.<locals>.<listcomp>r	   r   zYou have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.)_apply_liger_kernel_to_instancer   get_base_modelzRThe model is not an instance of PreTrainedModel. No liger kernels will be applied.zYou have set `use_liger_kernel` to `True` but liger-kernel >= 0.3.0 is not available. Please install it with `pip install liger-kernel`is_quantized_hf_peft_config_loadedhf_quantizeris_qat_trainable	_orig_modzYou cannot fine-tune quantized model with `torch.compile()` make sure to pass a non-compiled model when fine-tuning a quantized model with PEFTzYou cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft for more detailsz8The model you are trying to fine-tune is quantized with z but that quantization method do not support training. Please open an issue on GitHub: https://github.com/huggingface/transformers to request the support for training support for xlazZUsing --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags.z.Using fsdp only works in distributed training.quantization_methodaccepts_loss_kwargsc                 s   s    | ]
}|j tjjkV  qd S N)kindr   	ParameterVAR_KEYWORDr   kr   r   r   	<genexpr>~  s    
z#Trainer.__init__.<locals>.<genexpr>zSPassing both `optimizers` and `optimizer_cls_and_kwargs` arguments is incompatible.zPassing a `model_init` is incompatible with providing the `optimizers` argument. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.paramsa[  The model and the optimizer parameters are not on the same device, which probably means you created an optimizer around your model **before** putting on the device and passing it to the `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and `model.to(xm.xla_device())` is performed before the optimizer creation in your script.zPassing `optimizers` is not allowed if PyTorch FSDP is enabled. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.exist_okcollate_batchzRThe `data_collator` should be a simple callable (function, class with `__call__`).zHmax_steps is given, it will override any value given in num_train_epochszThe train_dataset does not implement __len__, max_steps has to be specified. The number of steps needs to be known in advance for the learning rate scheduler.zTthe `--group_by_length` option is only available for `Dataset`, not `IterableDatasetzOSageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead z(FP16 provided in SM_HP_MP_PARAMETERS is z+, but FP16 provided in trainer argument is z, setting to fp16zJ, but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer.autor   z2Tried to use `fp16` but it is not supported on cpucpu_ampzUsing z half precision backendapexzcUsing FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.)epsilonc                 S      g | ]	}t |tr|qS r   r   r3   r   cbr   r   r   r     
    
)is_local_process_zerois_world_process_zerostateful_callbacksz)No label_names provided for model class `z`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.z3Using torch.compile requires PyTorch 2.0 or higher.xla_fsdp_v2z*FSDPv2 requires `torch_xla` 2.2 or higher.)fsdptensor)
axis_names)loggerinforf   batch_eval_metricsr   r   r   keys
ValueErroreval_strategysave_strategyrU   BESTload_best_model_at_endmetric_for_best_modelr   r   full_determinismr[   seedrb   hp_name	deepspeedis_in_train"create_accelerator_and_postprocessrV   skip_memory_metrics_memory_trackerstartget_process_log_levelr   set_verbosity_setup_devicesr   call_model_initRuntimeErrorwarningswarnFutureWarning	__class____name__r*   getattris_model_parallelsetr   valueslenr   r   use_liger_kernelr}   liger_kernel.transformersr   r   r&   hasattrr   warningImportErrorr   is_trainabler   quantization_configquant_methodfsdp_configis_fsdp_xla_enabledr  is_deepspeed_enabledparallel_modere   DISTRIBUTEDplace_model_on_devicefp16_full_evalbf16_full_evaldo_trainis_fsdp_enabledr0   r   r   r   r   r   r   r   r   BITS_AND_BYTES_move_model_to_device_n_gpumodel_wrappedr   acceleratorr(   forwardr   model_accepts_loss_kwargsanyneftune_noise_alphar   r   	optimizerlr_schedulerr   r   param_groupsDEFAULT_CALLBACKSr
   	report_tor1   callback_handleradd_callbackdisable_tqdmr4   DEFAULT_PROGRESS_CALLBACK_loggers_initializedhub_model_idpush_to_hubinit_hf_reposhould_saveosmakedirsr   callable	max_stepsnum_train_epochsr^   utilsdatar   group_by_length_signature_columnsuse_apexuse_cpu_ampr   bf16IS_SAGEMAKER_MP_POST_1_10r  smpstatecfghalf_precision_backendr/   bfloat16	amp_dtyperu   label_smoothing_factorr<   label_smootherr7   controlr8   r  r  r   current_floshp_search_backendlabel_namesrs   rr   on_init_endtrain_batch_size_train_batch_size_created_lr_schedulerstop_and_update_metricstorch_compiler   getis_fsdp_xla_v2_enabledIS_XLA_FSDPV2_POST_2_2xrglobal_runtime_device_countxsset_global_meshMeshr   arrayrangeis_fsdp_xla_v1_enabled) selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   	log_leveldevicesr   _is_quantized_and_base_model&_quantization_method_supports_training%_is_model_quantized_and_qat_trainabledefault_collatorunwrapped_modelmodel_forwardforward_paramsparammodel_deviceparam_groupoptimizer_devicedefault_callbacksdefault_label_namesnum_devicesr   r   r   __init__  s&  





	



"


 
 
 
	
$zTrainer.__init__returnc                 C   s   t d | jS )NzUTrainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.r  r<  r   r  r   r   r   r   .  s   
zTrainer.tokenizerc                 C   s   t d || _d S )NzjTrainer.tokenizer is now deprecated. You should use `Trainer.processing_class = processing_class` instead.r  )r  r   r   r   r   r   3  s   
c                 C   sH   | j |}t|r|jj }n| }~| j|_|t}|| _	|S )z
        Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper:
        https://arxiv.org/abs/2310.05914
        )
rO  r(   r   
base_modelr   get_input_embeddingsrS  register_forward_hookr_   neftune_hook_handle)r  r   r  
embeddingshook_handler   r   r   _activate_neftune:  s   
zTrainer._activate_neftunec                 C   sP   t | ds	td| j|}t|r|jj }n| }| j	  |`
~dS )z^
        Deactivates the neftune method. Make sure to call `_activate_neftune` first.
        r  zNNeftune is not activated make sure to call `trainer._activate_neftune()` firstN)r;  r  rO  r(   r   r  r   r  r  removerS  )r  r   r  r  r   r   r   _deactivate_neftuneM  s   


zTrainer._deactivate_neftunec                 C      | j | dS )ag  
        Add a callback to the current list of [`~transformers.TrainerCallback`].

        Args:
           callback (`type` or [`~transformers.TrainerCallback]`):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will instantiate a member of that class.
        N)rY  rZ  r  callbackr   r   r   rZ  ^     	zTrainer.add_callbackc                 C   s   | j |S )aK  
        Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it.

        If the callback is not found, returns `None` (and no error is raised).

        Args:
           callback (`type` or [`~transformers.TrainerCallback]`):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will pop the first member of that class found in the list of callbacks.

        Returns:
            [`~transformers.TrainerCallback`]: The callback removed, if found.
        )rY  pop_callbackr  r   r   r   r  i  s   zTrainer.pop_callbackc                 C   r  )a  
        Remove a callback from the current list of [`~transformers.TrainerCallback`].

        Args:
           callback (`type` or [`~transformers.TrainerCallback]`):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will remove the first member of that class found in the list of callbacks.
        N)rY  remove_callbackr  r   r   r   r  y  r  zTrainer.remove_callbackc                 C   s6   | |}| jjtjkrt|dr|  d S d S d S )Ntie_weights)tor   rD  re   TPUr;  r  )r  r   r   r   r   r   rL    s   
zTrainer._move_model_to_devicec                 C   s   | j d u r>| j}t| jrt| jdr| j }n| jjj}t|j}t	|j
 | _ |  j t	tddg| j 7  _ d S d S )Nr   label	label_ids)rj  r   r   r;  r   r  r   r   rP  r   r   r  r6  rz  )r  model_to_inspectr   r   r   r    _set_signature_columns_if_needed  s   


$z(Trainer._set_signature_columns_if_neededdatasetdescriptionc                    s
  | j js S |   | j}tt jt| }t|dkrK|d u r$dnd| d}t	d| d| j
jj dd| d	d| d
| j
jj d  fdd|D }t|dkretdd| dttjtdk r j jd | jd d  S  |S )Nr    zin the z setzThe following columns z) don't have a corresponding argument in `z!.forward` and have been ignored: , z. If z are not expected by `z/.forward`,  you can safely ignore this message.c                    s   g | ]	}| j v r|qS r   )column_namesr   r  r   r   r         z2Trainer._remove_unused_columns.<locals>.<listcomp>zpNo columns in the dataset match the model's forward method signature. The following columns have been ignored: [zp]. Please check the dataset and model. You may need to set `remove_unused_columns=False` in `TrainingArguments`.1.4.0r   format_kwargs)r   columnsr  )r   remove_unused_columnsr  rj  r   r6  r  r8  r  r  r   r2  r3  joinr  r   r   datasetsr   
set_formatformatremove_columns)r  r  r  signature_columnsignored_columnsdset_descriptionr  r   r  r   _remove_unused_columns  s>   
zTrainer._remove_unused_columnsc                 C   s6   | j js|S |   | j}t||t|| jjjd}|S )z=Wrap the data collator in a callable removing unused columns.)r   r  r  r  
model_name)	r   r  r  rj  rT   r  r   r2  r3  )r  r   r  r  remove_columns_collatorr   r   r   "_get_collator_with_removed_columns  s   z*Trainer._get_collator_with_removed_columnsc                 C   s   | j d u s
t| j sd S | jjrJt r,t| j tjr,| jj| j j	v r)| j | jj nd }nd }| j
d ur9| j
jd nd }t| jj| jj | j ||dS t| j S )Nr   r  lengthsmodel_input_name)r   r^   r   ri  rx   r   r  r   length_column_namer  r   model_input_namesr>   r|  gradient_accumulation_stepsr   )r  r  r  r   r   r   _get_train_sampler  s$   
zTrainer._get_train_samplerc                 C   s   | j du r	td| j }| j}t r t|tjr | j|dd}n| j|dd}| j	|| j
j| j
j| j
jd}t|tjjjsT|  |d< | j
j|d< t|d< | j
j|d	< | jt|fi |S )
a@  
        Returns the training [`~torch.utils.data.DataLoader`].

        Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
        training if necessary) otherwise.

        Subclass and override this method if you want to inject some custom behavior.
        Nz+Trainer: training requires a train_dataset.trainingr  
batch_size
collate_fnnum_workers
pin_memorypersistent_workerssampler	drop_lastworker_init_fnprefetch_factor)r   r  r   rx   r   r  r   r  r  r}  r   dataloader_num_workersdataloader_pin_memorydataloader_persistent_workersr   rg  rh  r   r  dataloader_drop_lastra   dataloader_prefetch_factorrO  preparer   )r  r   r   dataloader_paramsr   r   r   get_train_dataloader  s&   
	zTrainer.get_train_dataloaderc                 C   s   |d u st |s
d S | jjr1t rt|t t dS t r-t|t	
 t	 | jjdS t|S | jjrgt rNt|tjrN| jj|jv rK|| jj nd }nd }| jd ur[| jjd nd }t| jj|||dS | jjdkrqt|S d S )N)num_replicasrank)r  r  r  r   r  r	   )r^   r   use_legacy_prediction_loopr   r?   xmxrt_world_sizeget_ordinalr   ro  dp_sizedp_rankper_device_eval_batch_sizer   ri  rx   r   r  r   r  r  r   r  r>   eval_batch_size
world_size)r  r   r  r  r   r   r   _get_eval_sampler	  sB   zTrainer._get_eval_samplerc                 C   sP  |du r| j du rtdt|tr|nd}t| dr-|| jv r-| jjr-| j	| j| S t|tr7| j | n|dur=|n| j }| j
}t rTt|tjrT| j|dd}n| j|dd}| jj|| jj| jj| jjd}t|tjjjs| ||d< | jj|d	< | jj|d
< t|fi |}| jjrt| dr|| j|< n||i| _| j	|S )a   
        Returns the evaluation [`~torch.utils.data.DataLoader`].

        Subclass and override this method if you want to inject some custom behavior.

        Args:
            eval_dataset (`str` or `torch.utils.data.Dataset`, *optional*):
                If a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed.
        Nz-Trainer: evaluation requires an eval_dataset.eval_eval_dataloaders
evaluationr  r  r  r  r  )r   r  r   strr;  r  r   r  rO  r  r   rx   r  r   r  r  r  r  r  r   rg  rh  r   r  r  r  r   )r  r   dataloader_keyr   r  eval_dataloaderr   r   r   get_eval_dataloader6  sF   



zTrainer.get_eval_dataloadertest_datasetc                 C   s   | j }t rt|tjr| j|dd}n| j|dd}| jj|| jj	| jj
| jjd}t|tjjjsF| ||d< | jj|d< | jj|d< | jt|fi |S )a  
        Returns the test [`~torch.utils.data.DataLoader`].

        Subclass and override this method if you want to inject some custom behavior.

        Args:
            test_dataset (`torch.utils.data.Dataset`, *optional*):
                The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
                `model.forward()` method are automatically removed. It must implement `__len__`.
        testr  r  r  r  r  )r   rx   r   r  r   r  r  r   r  r  r  r  r   rg  rh  r   r  r  r  rO  r  r   )r  r  r   r  r   r   r   get_test_dataloaders  s   zTrainer.get_test_dataloadernum_training_stepsc                 C   s8   |    trtjjjr| jj}n| j}| j||d dS )aZ  
        Setup the optimizer and the learning rate scheduler.

        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
        Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
        `create_scheduler`) in a subclass.
        )r  rT  N)create_optimizerrn  ro  rp  rq  r  rT  create_schedulerr  r  rT  r   r   r   create_optimizer_and_scheduler  s
   
z&Trainer.create_optimizer_and_schedulerc                 C   s   t |tg d}|S )a0  
        Get all parameter names that weight decay will be applied to.

        This function filters out parameters in two ways:
        1. By layer type (instances of layers specified in ALL_LAYERNORM_LAYERS)
        2. By parameter name patterns (containing 'bias', 'layernorm', or 'rmsnorm')
        )bias	layernormrmsnorm)rE   r.   )r  r   decay_parametersr   r   r   get_decay_parameter_names  s   z!Trainer.get_decay_parameter_namesc           	         s  t  r| jn| j}| jdu r| |  fdd| D | jjd fdd| D ddg}| jdur<| j\}}n	| 	| j|\}}d|v rN|
d}d|v rW|
d}d	|v r`|
d	}||fi || _|jd
krddl}|jj }d}| D ]7}t|tjr|tdd | D  7 }td| d|d  d ||dddi td| d q~td|d  d t  rt| j| _| jS )a   
        Setup the optimizer.

        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
        Trainer's init through `optimizers`, or subclass and override this method in a subclass.
        Nc                    s"   g | ]\}}| v r|j r|qS r   requires_gradr   npr  r   r   r         z,Trainer.create_optimizer.<locals>.<listcomp>)r  weight_decayc                    s"   g | ]\}}| vr|j r|qS r   r	  r  r  r   r   r     r          r  r   optimizer_dictAdam8bitr   c                 S   s   i | ]	}|  | qS r   )data_ptrnumelr   r  r   r   r   
<dictcomp>  r  z,Trainer.create_optimizer.<locals>.<dictcomp>zskipped : i   zM paramsweight
optim_bits    zbitsandbytes: will optimize z in fp32z	skipped: )r   rN  r   rT  r  named_parametersr   r  r   get_optimizer_cls_and_kwargspopr3  bitsandbytesoptimGlobalOptimManagerget_instancemodulesr   r   	Embeddingsumr   r7  r  r  register_module_overridedebugro  DistributedOptimizer)	r  	opt_modeloptimizer_grouped_parametersoptimizer_clsoptimizer_kwargsr  managerskippedmoduler   r  r   r     sN   








zTrainer.create_optimizerc                 C   s   t dd | j D S )z9
        Get the number of trainable parameters.
        c                 s   s    | ]
}|j r| V  qd S r   )r
  r  r  r   r   r   r        z7Trainer.get_num_trainable_parameters.<locals>.<genexpr>)r%  r   r   r  r   r   r   get_num_trainable_parameters  s   z$Trainer.get_num_trainable_parametersc                 C   s$   | j du r	tddd | j jD S )zR
        Returns the learning rate of each parameter from self.optimizer.
        NPTrainer optimizer is None, please make sure you have setup the optimizer before.c                 S      g | ]}|d  qS )lrr   r   groupr   r   r   r         z.Trainer.get_learning_rates.<locals>.<listcomp>rT  r  rV  r  r   r   r   get_learning_rates  s   
zTrainer.get_learning_ratesr  c                 C   sN   | j du r	td|dur| j jD ]}||d v r|  S qdd | j jD S )a  
        Returns optimizer group for a parameter if given, else returns all optimizer groups for params.

        Args:
            param (`str` or `torch.nn.parameter.Parameter`, *optional*):
                The parameter for which optimizer group needs to be returned.
        Nr2  r  c                 S   r3  )r  r   r5  r   r   r   r     r7  z/Trainer.get_optimizer_group.<locals>.<listcomp>r8  )r  r  r6  r   r   r   get_optimizer_group  s   
zTrainer.get_optimizer_groupc           #         s|	  i  j r j dddD ]}|d\}}||< qd ji j jf jd}	d|dtd	ttt	f d
ttt	f dt
dtt	t	f f
 fdd} jtjkrbt}ddd |fS  jtjtjfv rddlm} |}|  jtjkrddi |fS  jtjkrzddlm} |}| W |fS  ty   tdw  jtjkrzddlm}	 |	}| W |fS  ty   tdw  jtjkrzddlm}
 |
}| W |fS  ty   tdw  jtjtj tj!tj"tj#tj$tj%tj&tj'tj(tj)tj*tj+tj,tj-fv rzddl.m}m/}m0} d}d}d}|}d jv r4d}d jv r<d}d jv rE|}nd  jv rV|}d! j jfi}ntd" jv ra|}}nid# jv rt1 r~t23t4j52d$t23d%k r~td&dd'l.m6} |}t78d( jt78d) jt78d*d+ft78d,d-t78d. jd/}d0v rt9d0 |d0< d1v rt9d1 |d1< d2|i}d" jvr||d3< | | W n ty   td4w t1 rt23t4j52d$t23d5k rt:;d6 |fS  jtj<krWz7dd7l=m>} |}| t?8d8d9t@tA8d:d;t@tA8d<d;t@tA8d=d>d? W |fS  tyV   td@w  jtjBkrftAjjB}|fS  jtjCkrutAjjD}|fS  jtjEkrtAjj0}|fS  jtjFtjGtjHtjItjJtjKfv rtL stdAddBlMmN}mO}mP} tjF|tjG|tjH|tjI|tjJ|tjK|i}t9QdCdDt9QdEdFt7QdGdHQdIdJdK}| j||\} jtjHkrddd |fS  jtjRtjSfv rItT stdLddMlUmV} tjR|tjS|i}t9QdCdDQdNdOQdPdQt9QdEdFt7QdGdRQdIdJdS}| j||\}|fS  jtjWtjXfv rtY s[tdTtZdUsdtdVdu rmtdWddXl[m\}m]} dY jv r~|}n|}dZi |fS  jtj^krt_ std[dd\l`ma} |}t78d]d^t78d_d`t78dadbt78dcdbt78dddRde |fS  jtjbtjcfv r,td rt23t4j52dft23dgk rtdht23t4j52dit23djkrtdkddllemf}mg}  jtjbkr|}n jtjckr|}ntdm| |fS  jtjhtjitjjfv rtk s@tdntZdUsItdoddpllmm}mn}  i }d}! jtjhkrrtkdqsetdrddsllmo}" |"}|}d}!n jtjikr~|}|}n jtjjkr| }ntdt jp|du< |!r jq|dv< |t78dwd`t78dxdydz | |fS td{ j )}z
        Returns the optimizer class and optimizer parameters based on the training arguments.

        Args:
            args (`transformers.training_args.TrainingArguments`):
                The training arguments for the training session.

         r  ,=r4  )betasepsToptimizer_nameoptimizer_mappingoptim_kwargsis_layerwise_supportedr  c                    sJ  |   d}|rjtjkr|rtd|  d||  }jdu r*td|  dtjt	t
fs:tdj du rFtd|  d	tjt
oTjd
ddk}g }g  D ]6\}}	tj|dd\}
}t|	tjs|
r|st| d|  d q]|
s|sq]||	j |d  q]t|dkrtd|  dj dfdd D }| d|id|i|g}|rjdkrtd|  di  |D ]}|d|gigfi  |< q|D ]}|d|gi|gfi  |< q fdd} D ]}|jr|| qt}d i d|i |fS )a  
            Helper function to set up low-rank optimizers like GaLore and Apollo.

            Args:
                optimizer_name (str): Name of the optimizer.
                optimizer_mapping (dict): Mapping of optimizer names to their classes.
                optim_kwargs (dict): Keyword arguments for the optimizer.
                is_layerwise_supported (bool): Whether layerwise optimization is supported.

            Returns:
                Tuple[Any, Any]: Optimizer class and updated optimizer kwargs.
            	layerwisezLayer-wise z" does not support DDP at this timeNz1You need to define `optim_target_modules` to use z optimizerszX`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: z'You need to pass a model to initialize z optimizer._-z
all-linearT)return_is_regexz matched but ignored. z only supports linear layers.z.weightr   zNo target modules found for z ().c                    s   g | ]
\}}| vr|qS r   r   r  )target_params_namesr   r   r   k      zZTrainer.get_optimizer_cls_and_kwargs.<locals>.setup_low_rank_optimizer.<locals>.<listcomp>r  r	   z
Layerwise z( does not support gradient accumulation!c                    s*   | j d ur |     |    d S d S r   )gradstep	zero_grad)r  )r  r   r   optimizer_hook}  s   
z^Trainer.get_optimizer_cls_and_kwargs.<locals>.setup_low_rank_optimizer.<locals>.optimizer_hookr  )lowerendswithrD  re   rE  NotImplementedErroroptim_target_modulesr  r   r   r  replacenamed_modulesrX   r   Linearr  r<  appendr  r8  r  updater  r   r
  "register_post_accumulate_grad_hookr=   )r@  rA  rB  rC  is_layerwiser+  
all_lineartarget_paramsmodule_namer/  target_module_existsis_regexnon_target_paramsrV  r  rN  r   r   
optim_argsr,  )r  rI  r   setup_low_rank_optimizer,  sn   





"
zFTrainer.get_optimizer_cls_and_kwargs.<locals>.setup_low_rank_optimizerF)scale_parameterrelative_stepr   )AdamWfusedz7Trainer failed to import syncfree AdamW from torch_xla.)NpuFusedAdamWz3Trainer failed to import FusedAdamW from torch_npu.)	FusedAdamzFTrainer tried to instantiate apex FusedAdam but apex is not installed!)re  LionRMSpropr  Npaged8bit   adamlionr>  rmspropademamixr  z0.44.0z{The AdEMAMix optimizer is not supported by your current version of `bitsandbytes`. Please install `bitsandbytes` >= 0.44.0.)AdEMAMixbeta1beta2beta3gH.?alphag      @r?  )r>  rv  r?  t_alphat_beta3r  is_pagedzOTrainer tried to instantiate bnb optimizer but `bitsandbytes` is not installed!z0.41.1zYou are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. It is recommended to update your version as a major bug has been fixed in 8-bit optimizers.)AnyPrecisionAdamWuse_kahan_summationFalsemomentum_dtypefloat32variance_dtypecompensation_buffer_dtypers  )r{  r}  r  r  z4Please install https://github.com/pytorch/torchdistxzYou need to install `galore_torch` in order to use GaLore optimizers install it with `pip install git+https://github.com/jiaweizzhao/GaLore`)GaLoreAdafactorGaLoreAdamWGaLoreAdamW8bitr     update_proj_gap   scaleg      ?	proj_typestd)r  r  r  r  zYou need to install `apollo_torch` in order to use APOLLO optimizers install it with `pip install git+https://github.com/zhuhanqing/APOLLO`)APOLLOAdamWprojrandom
scale_typechannelg      ?)r  r  r  r  r  r  ziYou need to install `lomo_optim` in order to use LOMO optimizers install it with `pip install lomo-optim`0.30.0zGYou need to have `accelerate>=0.30.0` to be able to use LOMO optimizerszMYou need to pass a `model` in order to correctly initialize a LOMO optimizer.)AdaLomoLomoadar   z5Please install grokadamw with `pip install grokadamw`)	GrokAdamW
alpha_initg\(\?lambg       @gammag?grokking_signal_decay_rategradient_clipping)r  r  r  r  r  torchaoz0.4.0zYou need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers.Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/aor   z2.4zYou need to have `torch>2.4` in order to use torch 4-bit optimizers. Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly.)	AdamW4bit	AdamW8bitzInvalid optimizerzwYou need to install `schedulefree` in order to use schedulefree optimizers. Install it with `pip install schedulefree.`zOYou need to have `accelerate>=0.30.0` to be able to use schedulefree optimizers)AdamWScheduleFreeSGDScheduleFreer  zYou need to install `schedulefree>=1.4.0` in order to use RAdamScheduleFree optimizer. Install it with `pip install schedulefree.`)RAdamScheduleFreezInvalid schedulefree optimizerr  warmup_stepsweight_lr_powerrr  )r  r  z2Trainer cannot instantiate unsupported optimizer: T)rra  rS  splitlearning_rate
adam_beta1
adam_beta2adam_epsilonr  dictr   booltupler   rd   	ADAFACTORr+   rW  ADAMW_TORCHADAMW_TORCH_FUSEDtorch.optimre  ADAMW_TORCH_XLAtorch_xla.amp.syncfreer=  r  ADAMW_TORCH_NPU_FUSEDtorch_npu.optimrg  ADAMW_APEX_FUSEDapex.optimizersrh  	ADAMW_BNB
ADAMW_8BITPAGED_ADAMWPAGED_ADAMW_8BITADEMAMIXADEMAMIX_8BITPAGED_ADEMAMIXPAGED_ADEMAMIX_8BITLION	LION_8BIT
PAGED_LIONPAGED_LION_8BITRMSPROP_BNBRMSPROP_8BITRMSPROP_32BITbitsandbytes.optimri  rj  rw   r   r   r   r   rr  floatr  intr  r<  ADAMW_ANYPRECISIONtorchdistx.optimizersrz  r   r4  r   SGDADAGRADAdagradRMSPROPGALORE_ADAMWGALORE_ADAMW_8BITGALORE_ADAFACTORGALORE_ADAMW_LAYERWISEGALORE_ADAMW_8BIT_LAYERWISEGALORE_ADAFACTOR_LAYERWISEry   galore_torchr  r  r  r  APOLLO_ADAMWAPOLLO_ADAMW_LAYERWISErv   apollo_torchr  LOMOADALOMOr~   rt   
lomo_optimr  r  	GROKADAMWrz   	grokadamwr  ADAMW_TORCH_4BITADAMW_TORCH_8BITr   torchao.prototype.low_bit_optimr  r  SCHEDULE_FREE_RADAMSCHEDULE_FREE_ADAMWSCHEDULE_FREE_SGDr   schedulefreer  r  r  r  r  )#r   r   mappingkeyvalueadam_kwargsrb  r+  re  rg  rh  ri  rj  ry  r  additional_optim_kwargsrr  
bnb_kwargsrz  r  r  r  rA  galore_optim_kwargsr  apollo_optim_kwargsr  r  r  r  r  r  r  require_warmupr  r   r`  r   r    s  


	


`  :  
  3    -    %      





 L 
 @  3  1  / 

  


	p

ZR
1




z$Trainer.get_optimizer_cls_and_kwargsrT  c                 C   sH   | j du r!t| jj|du r| jn|| j||| jjd| _ d| _| j S )z
        Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
        passed as an argument.

        Args:
            num_training_steps (int): The number of training steps to do.
        N)rT  num_warmup_stepsr  scheduler_specific_kwargsT)rU  r,   r   lr_scheduler_typerT  get_warmup_stepslr_scheduler_kwargsr~  r  r   r   r   r    s   

zTrainer.create_scheduler
dataloaderc              
   C   sT   z|j }t|trt|j j W S t|j W S  tttfy)   t|| jj  Y S w )z
        Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
        dataloader.dataset does not exist or has no length, estimates as best it can
        )	r  r   r;   r8  	NameErrorAttributeError	TypeErrorr   per_device_train_batch_size)r  r  r  r   r   r   num_examples  s   
zTrainer.num_examplestrain_dlre  c                 C   s^   d}z| D ]}|d   }|dur||   W S ||7 }qW |S  ty.   td Y |S w )zq
        Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader.
        r   	input_idsNz%Cannot get num_tokens from dataloader)r  KeyErrorr  r<  )r  re  train_tokensbatchtokensr   r   r   
num_tokens  s   
zTrainer.num_tokenstrialzoptuna.Trialc                 C   s  || _ | jdu s|du rdS | jtjkr| |}n(| jtjkr)|}|dd n| jtjkr:dd |j	 D }n| jtj
krB|}|	 D ],\}}t| j|sZtd| d qFt| j|d}|durkt||}t| j|| qF| jtjkrtd|j  | jtjkrtd|j  | jtj
krtd	|  | jr| jjdu rtd
| j  ddlm} ddlm} || jj| j_| jj| j || jjd| j_t  !  | "  dS )zHP search setup codeNwandbc                 S   s(   i | ]\}}|t |trt|n|qS r   )r   r  r  r   r   vr   r   r   r    s   ( z,Trainer._hp_search_setup.<locals>.<dictcomp>zTrying to set zY in the hyperparameter search but there is no corresponding field in `TrainingArguments`.zTrial: zSigOpt Assignments: zW&B Sweep parameters: z7For sweeps with deepspeed, `args.deepspeed` must be setr   )DeepSpeedPluginHfTrainerDeepSpeedConfig)hf_ds_config)#_trialry  rQ   OPTUNAhp_spaceRAYr  SIGOPTassignmentsitemsWANDBr;  r   r  r<  r4  r   setattrr  r  rC  r$  r  rO  free_memoryaccelerate.utilsr  #transformers.integrations.deepspeedr  hf_deepspeed_configtrainer_config_processdeepspeed_pluginr   _reset_stater&  )r  r  r  r  r  old_attrr  r  r   r   r   _hp_search_setup  sN   


zTrainer._hp_search_setuprL  metricsc                 C   s  | j d u s	|d u rd S | }| || _| j tjkrIdd l}t|drC|j	 sE|
| j| | rG| j| j| j| j | d S d S d S | j tjkrdd l}t )}d }| jjrk| j|d |jj|}| j|d< |jj
||d W d    d S 1 sw   Y  d S d S )Nr   study)checkpoint_dir	objective
checkpoint)ry  copycompute_objectiver  rQ   r  optunar;  r  _is_multi_objectivereportshould_prunerY  on_train_endr   rp  rw  TrialPrunedr	  	ray.traintempfileTemporaryDirectoryra  _tune_save_checkpointtrain
Checkpointfrom_directory)r  r  rL  r  r   raytemp_checkpoint_dirr  r   r   r   _report_to_hp_search1  s0   

"zTrainer._report_to_hp_searchr  c                 C   s   t j|t d| jj }| j|dd | jjrK| j	 | jj
d< | jt j|t t| j t j|t t| j t j|t d S d S )NrF  T_internal_callr7   )rb  pathr  rM   rp  global_step
save_modelr   ra  rw  r  save_to_jsonTRAINER_STATE_NAMEr   saverT  
state_dictOPTIMIZER_NAMErU  SCHEDULER_NAME)r  r  r   r   r   r   r)  I  s    zTrainer._tune_save_checkpointc                 C   sL   t | j}|dkr|  }n|dkr| |}ntd|d u r$td|S )Nr   r	   z'model_init should have 0 or 1 argument.z"model_init should not return None.)r`   r   r.  )r  r  model_init_argcountr   r   r   r   r-  S  s   

zTrainer.call_model_initFc                    s   |s|d u rt d |S tt| |   zt|}|  |jdd }|r.||_	t
dd}| jj|dk t W tttjjtdkrot tr^tjj| dd}n1tjj| fdd	 D dd}n g } D ]}t | }	||	 qst|}tjj||dd
}W d    n1 sw   Y  W d    n1 sw   Y  tj|}t  |di   |di   W d    n1 sw   Y  |}d| _W |S  ttttt fy }
 zt d|
 d W Y d }
~
|S d }
~
ww |S )NzAfailed to use PyTorch jit mode due to current dataloader is none._original_forwardF)cache_enabled)autocast_handlerr   )example_kwarg_inputsstrictc                    s   i | ]}| | qS r   r   )r   r  example_batchr   r   r  w  s    z0Trainer.torch_jit_model_eval.<locals>.<dictcomp>r@  z'failed to use PyTorch jit mode due to: .r   )!r  r<  nextiter_prepare_inputsr  r  __dict__r  rP  r   rO  autocastr   no_gradr   r   r   base_versionr   r  jittrace	ones_likerV  r  freezerl  r.  r  r  r  
IndexError)r  r   r  r  	jit_modeloriginal_forwardr>  
jit_inputsr  example_tensorer   rA  r   torch_jit_model_evala  s\   




 
zTrainer.torch_jit_model_evalc                 C   s   t  stddd l}|s+|  | js| jjrtjn|}|j	||dd| j d}|S |j
s2|  |j	||| jddd\}| _|S )NzUsing IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer to https://github.com/intel/intel-extension-for-pytorch.r   O1F)r   levelconv_bn_foldinginplaceT)r   rT  rZ  rX  )r|   r=  intel_extension_for_pytorchr  r%  r   rH  r   rs  optimizer  r*  rT  )r  r   r  r   ipexr   r   r   ipex_optimize_model  s    zTrainer.ipex_optimize_modelc              	   C   s   dddd}d}d}|  D ],\}}t||d }t||d }	|d ur:|	d ur:||	kr:|d| d| d	|	 d
7 }d}q|j}
|jtd|j }|
|krX|d|
 d	| d
7 }d}|rat| d S d S )Nlogging_steps
eval_steps
save_steps)r_  r`  ra  FztWarning: The following arguments do not match the ones in the `trainer_state.json` within the checkpoint directory: z
	r  z (from args) != z (from trainer_state.json)Tr	   z
	per_device_train_batch_size: )r  r4  r  r|  maxn_gpur  warning_once)r  training_argstrainer_stateattributes_maphas_warningwarning_strarg_attr
state_attr	arg_valuestate_valuetrain_bs_argstrain_bs_stater   r   r   #compare_trainer_and_checkpoint_args  s*   z+Trainer.compare_trainer_and_checkpoint_argsc                    sR  j jrjr
tjntj}j|||d}t r,tj	t
jjr#j	S t
j|j jdS j||ur6|S jrI|rItj|jj jd\}_j jdkrZt|ddsZt|}j jrst }|||}tt | d_|sw|S jrAzdd	lm   dd
lm! ddl"m#}m$} j%rddl&m' W n t(y   t(dw d }d }	t|dd }
j j)*d|
}j j)d dkrt+j,|j j)d d}n%|d urt- }|D ]}t.||}|d u rt/d|0| qt+j,||d}j j1}j j)d r|j2j3r
t45d d|j2_3 fdd}	j%r'dd }||||	d _}n |f||	d| _}di fdd}|t6_7|S t8 rUtj9j:|t;t<=dgd}|S j j>t?j@krtA rc|S i }j jBd ursj jB|d < nt|tCr|jD |d < nd!|d < j jEd urj jE|d"< j jFd urj jF|d#< tGd$i |j_H|S )%N)r   )backward_passes_per_step)	opt_levelr	   is_loaded_in_8bitF   r   )XlaFullyShardedDataParallel)checkpoint_module)size_based_auto_wrap_policytransformer_auto_wrap_policy)SpmdFullyShardedDataParallelzJMissing XLA FSDP related module; please make sure to use torch-xla >= 2.0._no_split_modulestransformer_layer_cls_to_wrapmin_num_params)r|  z@Could not find the transformer layer class to wrap in the model.)transformer_layer_clsxla_fsdp_grad_ckptzX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.c                    s(   j s n}|| g|R i |S r   )r  )mr   kwargs
target_clsFSDPFSDPv2rv  r  r   r   auto_wrapper_callable  s   z2Trainer._wrap_model.<locals>.auto_wrapper_callablec                 S   sh   ddl m} d }t| tjr| }nt| tr| d }nt| |r#| j}|d u r+tdt	||d d S )Nr	   )CausalLMOutputWithPastr   zASomething went wrong, the output of the model shouldn't be `None`)r  NN)
modeling_outputsr  r   r   Tensorr  logitsr  r  mark_sharding)outputmeshr  real_outputr   r   r   shard_output  s   


z)Trainer._wrap_model.<locals>.shard_output)r  auto_wrap_policyr  )r  r  c                 S   s    | j di |}|rt  |S )Nr   )rL  r  	mark_step)rT  barrieroptimizer_argslossr   r   r   patched_optimizer_step9  s   z3Trainer._wrap_model.<locals>.patched_optimizer_stepSMDATAPARALLEL_LOCAL_RANK)
device_idsfind_unused_parametersTbucket_cap_mbbroadcast_buffersr   )Ir   use_ipexrl  r   rs  r~  r^  r   r   rN  ro  r   DistributedModelr  rO  r(   rk  r   
initializerT  fp16_opt_levelrc  r4  r   DataParalleljit_mode_evaltimerV  roundjit_compilation_timerB  torch_xla.distributed.fsdpru  rv  torch_xla.distributed.fsdp.wraprw  rx  r  7torch_xla.experimental.spmd_fully_sharded_data_parallelry  r=  rA  r  	functoolspartialr6  rD   	Exceptionaddxla_fsdp_configconfig	use_cacher  rd  r  optimizer_stepr   parallelDistributedDataParallelr  rb  getenvrD  re   rE  r   ddp_find_unused_parametersr&   is_gradient_checkpointingddp_bucket_cap_mbddp_broadcast_buffersr   ddp_handler)r  r   r  r  r   
start_timerw  rx  r  r  %default_transformer_cls_names_to_wrap"fsdp_transformer_layer_cls_to_wraptransformer_cls_to_wraplayer_classtransformer_clsfsdp_kwargsr  r  r  r   r  r   _wrap_model  s   



	zTrainer._wrap_modelresume_from_checkpointignore_keys_for_evalc           	      K   s  |du rd}| j   | j}d| _| jdur| | j| _|js#|jr6|j	s6| j
s6| jdu r6| | j|j d|v rE|d}tdt t|dkrZtddt|  d	| | | jj| _d}| jdur| jjrut| jjnt| jj | || _d}d
\| _| _ t!|t"r|rt#|j$}|du rt%d|j$ d|durt& s| j's| j(s| )| t*+t,j-|t.}|jdur|j| _|r| j/r| | j|j | j| _0t1| j2| j|j3}|j4rzt56  |||||dW t57  S t57  w |||||dS )a  
        Main training entry point.

        Args:
            resume_from_checkpoint (`str` or `bool`, *optional*):
                If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
                `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
                of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
            trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
                The trial run or the hyperparameter dictionary for hyperparameter search.
            ignore_keys_for_eval (`List[str]`, *optional*)
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions for evaluation during the training.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments used to hide deprecated arguments
        FNT
model_pathzi`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` instead.r   z*train() got unexpected keyword arguments: r  rD  r   z/No valid checkpoint found in output directory ())r   r  r  r  )8r(  r)  r   r%  rS  r  r   rG  rH  rI  r5  r   rL  r   r  r/  r0  r1  r8  r  r  r   r  r  r|  r}  r!  r[   r"  rb   r-  rT  rU  r   r  r]   r   r  r   rC  rJ  _load_from_checkpointr8   load_from_jsonrb  r2  r6  rF  rN  r\   _inner_training_loopauto_find_batch_sizer_  hf_hub_utilsdisable_progress_barsenable_progress_bars)	r  r  r  r  r  r   model_reloadedrp  inner_training_loopr   r   r   r*  [  s   









zTrainer.trainc           ;      C   s  | j   || _| jjrE| jj| jkr@ddlm} || j	\| _	| j
| _	| jr@| jj}| jtd| jj | j_| d || j_| j| j_td| j  |  }| jrYt|}| j|j |j }	| |||	\}
}}}}}}d }| jjr| ||r}d n|}|d ur|r||j9 }n||j9 }tj| jjv r| jjdkrtdt| j
}t p| j p| j!}| j!ot"| j jj#dddk}|rd	}| j$rd | _%d	| _$| jrt&| |d
\| _'| _%|s| j(|d
 t)dd | j*j+| j,g D d| _|d u| j_-| j| j_| j.|| |j/r| j
j0|j1d | 2| j	}|| j
u rdnd	}|r.| j!r.t3| j
dd| _
|rM|rG| 4  | j j5dkrG| j 6| j
| _
| j(|d
 |r| j
7  t8| j%dru| j9rh| j 6| j
}n1| j 6| j
| j'\}| _'n$| j 6| j
| j'| j%\}| _'| _%n| jj:t;j<t;j=fv r| j 6| j'| _'| j!r| | _
| _	|| j
ur|| _	| jr| j	| _>|d ur| jrt?| j	|t@| j
 d nt s| j!r| A|| j	 | B| | C| tDd tDd|d tDd|
d tDd| jjd | jj| jkrtDd| jd tDd|	d tDd|j  tDd|d tDdtE|ddd d| j_FtGG }d}d}d }|d urtHjIJtHjIK|tLrt)MtHjIK|tL| _| N| j| j | O  tP| jjQ| }|jRs| jjQ| }||j9 }nd}tDd tDd|  tDd | jjQ  |jRstDd!| d"| d# d$D ]}tS| j*|t"| | q|| j*_T| jU| ||
| tVjWd%|jXd&}d%| _Y| jjQ| _Z|[  d }d }| j*\|| j| j,| _,|j]r| j^||dd' t_||
D ]
}|} t8| d(r| `| |jadkr d | _b|d ur)tc| n|jd|j }!| j*e|| j| j,| _,||krN|d urN|dkrN| f| d	}"d}#|dkrbtg| |} |}#d}d}"d)}$th| }%||j }&|&dkru|j}&d)}'|!|j d }(|jdkr|(d8 }(t_|(D ]4})|'d7 }'|'|(d kr|jn|&}*| i|%|*|jX\}+},tj|+D ]\}-}.|$d7 }$|$d |j dkp|$d |!k}/| j jkl|/ | jjmrt"| j
d*d+}0|0|.vrtnd, n!|.|0 o }1tVjW|1| jjXtVjpd-}1| j jq| j r|1s t 7  _q|"r| f| d	}"|dkr.|d8 }|d ur"|ud |dkr,| f| q|d ur9|v  d }|$|j dkrL| j*w|| j| j,| _,|-tc|+d krf| j jxtyjzkrft{j|| j j}|d.nt~j}2|2  | ||.|,}3W d    n	1 sw   Y  |jrt stV|3stV|3r||d| jjQ | jZ   }n|jX|3jXkrtd/|jX d0|3jX ||3 }|  jt| |.7  _|/r| j jkld |jd ur'|jdkr't r|jr| j'|j}4n| j9r tjt| j'|j}4n
| j | |j}4t r%| j jxtyjzkr%| }t8|d1r$|t }n|4}| j*|| j| j,| _,| j'  | j*|| j| j,| _,|  }| j jsZt| j%tVj:j%jsZ| j%  |[  | j jQd7  _Q||$d |# |!  | j_F| j*|| j| j,| _,| j||||||||d2 n| j*|| j| j,| _,| j,js| j,jrt rt   nq| j,js| j,jrt rt   nq|$dk rtnd3| jjQ d4| d5 d| j,_| j*|| j| j,| _,| j||||||||d2 tj| jjv r
t rtt  ntnd6 | j,jr nq|jar"t8| d7r"t| d7 tDd8 |jrT| jjd urTt r<td9 n|jtjkrHt  nt rPt  |   |  jY|t 7  _Yt| jjQd:}5| jY|5 }6td;||| jjd|d<}7|   | jj|7d=< |6|7d>< d	| _| j|7 | |7 | |}8| jd	|8d?}9| jjr| jjd ur| jjdkr|9D ]}:tHjI|:| jjstDd@|: dA tj|:ddB q| j*|| j| j,| _,|   | jd ur| | j
 t| jjQ|6|7S )CNr   )release_memoryr	   Tz)Currently training with a batch size of: zCurrently --debug underflow_overflow is not supported under DP. Please use DDP (torchrun or torch.distributed.launch (deprecated)).fsdp_version   F)r  c                 S   r  r   r  r  r   r   r   r   	  r  z0Trainer._inner_training_loop.<locals>.<listcomp>)r  )gradient_checkpointing_kwargs)	recursivefp8rL  load_module_strictz***** Running training *****  Num examples = r<  z  Num Epochs = z(  Instantaneous batch size per device = zA  Training with DataParallel so batch size has been adjusted to: zE  Total train batch size (w. parallel, distributed & accumulation) = z   Gradient Accumulation steps = z  Total optimization steps = z#  Number of trainable parameters = )trainable_onlyzE  Continuing training from checkpoint, will skip to saved global_stepz!  Continuing training from epoch z'  Continuing training from global step z  Will skip the first z epochs then the first z batches in the first epoch.)r   rT  rU  r  r   )skip_scheduler	set_epochmain_input_namer  zTried to track the number of tokens seen, however the current model is not configured properly to know what item is the input. To fix this, add a `main_input_name` attribute to the model class you are using.)r   r   r   z0Calculated loss must be on the original device: z but device in use is item)r  zXThere seems not to be a single sample in your epoch_iterator, stopping training at step zI! This is expected if you're using an IterableDataset and set num_steps (z.) higher than the number of available samples.zYou enabled PyTorch/XLA debug metrics but you don't have a TPU configured. Check your training configuration if this is unexpected._pastzU

Training completed. Do not forget to share your model on huggingface.co/models =)

r  gMbP?r*  )num_samples	num_stepsr  
total_flos
train_loss	use_mtimer   Deleting older checkpoint [] due to args.save_total_limitignore_errors)rO  r  r}  r   r  rp  r|  r  r  rN  r   rC  r  rb  rc  propagate_args_to_deepspeedr  r'  r  r  r$   r  r  set_initial_training_valuesinclude_tokens_per_secondr  rf  r   UNDERFLOW_OVERFLOWr  r   r   rB  rJ  r4  fsdp_pluginr~  rU  r!   rT  r  r8   rY  r   rw  is_hyper_param_searchcompute_stepsgradient_checkpointinggradient_checkpointing_enabler  r  r(   _fsdp_qlora_plugin_updatesmixed_precisionr  r*  r;  rk  r   rd   r  r  r$  r"   r   r  _load_optimizer_and_scheduler_load_scalerr  rC   epochr  rb  r2  isfiler  r6  r  rp  _load_callback_stater  r3  ignore_data_skipr  train_dataloaderinit_training_referencesr   r  r   _total_loss_scalar_globalstep_last_loggedrM  on_train_begineval_on_start	_evaluater  r  
past_indexr  r8  re  on_epoch_begin_load_rng_stater   rF  get_batch_samples	enumerategradient_state_set_sync_gradientsinclude_num_input_tokens_seenr<  r  int64num_input_tokens_seengatherr%  r  rW  closeon_step_begindistributed_typer   	DEEPSPEEDr  r  no_syncr   r   training_steplogging_nan_inf_filterr   isnanisinfrx  r  floating_point_opsmax_grad_normr  clip_master_gradsr   rg  clip_grad_norm_r   master_paramsr   rt   get_global_grad_normon_pre_optimizer_steprL  on_optimizer_stepr   optimizer_step_was_skippedr   ReduceLROnPlateauon_step_end_maybe_log_save_evaluateon_substep_endshould_epoch_stopshould_training_stopr  r  on_epoch_endTPU_METRICS_DEBUGmaster_printmetmetrics_reportdelattrr  best_model_checkpoint
rendezvousrD  re   rE  distr  ro  _load_best_modelrc   
store_flosr  r%  r(  r  log_get_output_dir_sorted_checkpointsra  save_total_limitsamefileshutilrmtreer$  _finish_current_pushrS  r  rW   );r  r  r   r  r  r  r  original_bsr  total_train_batch_sizerf  num_update_steps_per_epochr  num_train_samplesepoch_basedlen_dataloaderre  num_train_tokensdebug_overflowdelay_optimizer_creationis_fsdp2r   use_accelerator_preparer  epochs_trainedsteps_trained_in_current_epochsteps_trained_progress_barattrtr_loss	grad_normr  r  epoch_dataloadersteps_in_epochrng_to_syncsteps_skippedrL  epoch_iterator	remainderupdate_steptotal_updatesrE  num_batchesbatch_samplesnum_items_in_batchiinputsdo_sync_stepr  input_tokenscontexttr_loss_step
_grad_normeffective_global_stepr  r  run_dircheckpoints_sortedr  r   r   r   r    s  


	

















 

 


















&zTrainer._inner_training_loopc                 C   s   | j d urW|d urW| j tjkr|j}n*| j tjkr%dd l}|j  }n| j tj	kr/|j
}n| j tjkr=dd l}|jj
}| jd urG| |nd| }tj| jj|}|S | jj}|S )Nr   zrun-)ry  rQ   r  numberr	  r&  r*  get_contextget_trial_idr
  idr  r  runr#  rb  r2  r  r   r   )r  r  run_idr-  r  run_namerX  r   r   r   r-  
  s    zTrainer._get_output_dirc                    s  |d u r| j }tj t}tj t}tj t}tj t}tj t}tj t	}tj t
}	tj oYt fddt D pYtjtj t d}
tj rl fddt D ng }|
r{| js{td  dtdd ||||	||fD s|
s|std	  td
  d tj|rt|}|j}|d ur|tkrtd| dt d tj|stj|s|
rQt rtjtj drtj tddd d S t| jdr| jjdu rtd tj |ddd}d|d< |j!|dd}~d S | jr&t"| j#j$j%| j#| fi t&  d S | jj'r;tj|r;t(jj)|dd}ntj |ddd}|!|d}~| *| d S t+|rt|dsbt|drt|drtj, rt|dr|j-}t.|dkrtd |d }n|j/}|r|D ]}tj |}|j0||||kd  q|1| d S |j0 |dd  d S td!t d" d S td# d S t2| t | jj'd$}t s| *| d S d S )%Nc                 3   s.    | ]}t jt j |rt|v V  qd S r   )rb  r2  isdirr  FSDP_MODEL_NAMEr   folder_namer  r   r   r  
  s    
z0Trainer._load_from_checkpoint.<locals>.<genexpr>z.binc              	      sV   g | ]'}t jt j |rt jt j |ts't jt j |tr|qS r   )rb  r2  ra  r  r  ri   rh   rc  re  r   r   r   
  s    z1Trainer._load_from_checkpoint.<locals>.<listcomp>zCheckpoint found at z* is only supported when using PyTorch FSDPc                 s   s    | ]	}t j|V  qd S r   )rb  r2  r  )r   fr   r   r   r  
  s
    

z!Can't find a valid checkpoint at zLoading model from rD  z9You are resuming training from a checkpoint trained with z- of Transformers but your current version is zJ. This is not recommended and could yield to errors or unwanted behaviors.user_content.ptFr2  tagr  load_optimizerr  TzOEnabling FP16 and loading from smp < 1.10 checkpoint together is not supported.r   map_locationweights_only_smp_is_partialrC  r  active_adapteractive_adaptersload_adapterr	   zFMultiple active adapters detected will only consider the first adapterr   )r>  jThe intermediate checkpoints of PEFT may not be saved correctly, consider using a custom callback to save i in corresponding saving folders. Check some examples here: https://github.com/huggingface/peft/issues/96GCould not load adapter model, make sure to have `peft>=0.3.0` installed)r@  prefer_safe)3r   rb  r2  r  rj   ri   rh   rn   rm   rl   rk   ra  rR  listdirr  rb  rJ  r  r  r  r   from_json_filetransformers_versionr   r<  r   ro  r  r;  r   r  r   loadload_state_dictr   rO  rp  r  r   save_safetensorssafetensors	load_file_issue_warnings_after_loadr   existsrp  r8  ro  rq  set_adapterr'   )r  r  r   config_fileadapter_weights_fileadapter_safe_weights_fileweights_fileweights_index_filesafe_weights_filesafe_weights_index_fileis_fsdp_ckptadapter_subdirsr  checkpoint_versionr8  load_resultrp  ro  subdir_namepeft_idr   re  r   r  
  s   





	


zTrainer._load_from_checkpointc              
   C   s  t d| jj d| jj d tj| jjt}tj| jjt	}tj| jjt
}tj| jjt}t r:| jn| j}| jrPt| j| jjt| j d d S | jrgt| jjj| j|| jjfi t }d S tj|stj|stj|stj|rd}t rtjtj| jjdrtj| jjtddd d S | jjrtj|rtjj|d	d
}ntj |d	dd}d|d< |j!|dd}d S t|rUt"|dst"|drMt"|drMt"|dr|j#d }	t$|j#dkrt %d n|j&}	tj|stj|rAz
|'| jj|	 W n% t(y4 }
 z|j)|	 j*r/d|j)|	 j+j, d}t(||
 d }
~
ww ddl-m.} |g g }n7t %dt
 d d}n+t %d d}n#| jjrjtj|rjtjj|d	d
}ntj |d	dd}|!|d}t s|r| /| d S d S d S tjtj| jjt0stjtj| jjt1rt2|| jjt d}t s| /| d S d S t %d| d d S )NzLoading best model from z	 (score: rH  r  Trg  Frh  r   r  rk  rn  rC  ro  rp  rq  r   r	   zCDetected multiple active adapters, will only consider the first onez0When using prompt learning PEFT methods such as z, setting load_best_model_at_end=True can lead to errors, it is recommended to set this to False and to load the model manually from the checkpoint directory using PeftModel.from_pretrained(base_model, <path>) after training has finished.)_IncompatibleKeysrr  rs  rt  z#Could not locate the best model at zi, if you are running a distributed training on multiple nodes, you should activate `--save_on_each_node`.)3r  r  rp  r'  best_metricrb  r2  r  rn   rl   ri   rh   r   rN  r   rC  r"   r   rJ  r   rO  r  r   r  r  ro  r  r   r{  r|  r   r}  ry  rz  r;  rp  r8  r<  ro  rq  r.  peft_configis_prompt_learning	peft_typer  torch.nn.modules.moduler  r~  rk   rm   r'   )r  best_model_pathbest_safe_model_pathbest_adapter_model_pathbest_safe_adapter_model_pathr   r  has_been_loadedr8  ro  excmsgr  r   r   r   r*  ]  s    







	





"
zTrainer._load_best_modelc                 C   sz   t |jdkr(| jjd urt|jt| jjkr| j  n
td|j d t |jdkr;td|j d d S d S )Nr   z8There were missing keys in the checkpoint model loaded: rD  z;There were unexpected keys in the checkpoint model loaded: )	r8  missing_keysr   _keys_to_ignore_on_saver6  r  r  r<  unexpected_keys)r  r  r   r   r   r~    s   z"Trainer._issue_warnings_after_loadc                 C   s   | j |d}| || jj| t| jtjjjrS|sS| j	j
}|ds(d| }z| j||  W |S  tyR } ztd| dt|  d| d|d }~ww |S )Nignore_keyseval_9The `metric_for_best_model` training argument is set to 'W', which is not found in the evaluation metrics. The available evaluation metrics are: zX. Please ensure that the `compute_metrics` function returns a dictionary that includes 'zM' or consider changing the `metric_for_best_model` via the TrainingArguments.)evaluater/  rp  r3  r   rU  r   r   r  r   r   
startswithrL  r  r   r  )r  r  r  r  r  metric_to_checkr  r   r   r   r    s,   

	
zTrainer._evaluatec	                 C   s<  | j jrf| jj| jkrft rt  i }	| |	 
 }
||8 }t|
| jj| j  d|	d< |d urAt|tjr=|
 n||	d< |d urJ||	d< n|  |	d< |  j|
7  _| jj| _|   | |	| d }| j jr| ||}| j||d}| jjtjkr|| j _| j jr| || | j| j| j| j | _ d S d S )Nrt  r  rD  r  )r  r  )rw  
should_logrp  r3  r  r   r  r  _nested_gathermeanr  r  r   r   r  r   r  r+  r,  should_evaluater  _determine_best_metricr   r  rU   r  ra  _save_checkpointrY  on_save)r  rC  rD  r   r  r  r  r  r  logstr_loss_scalarr  is_new_best_metricr   r   r   r    s4   

z Trainer._maybe_log_save_evaluatec                 C   s  |d u rd S | j jdkr-| j j}tj|d| d}tj|s,td| d d S ntj|d}tj|sAtd d S t	  t
j|dd	}W d    n1 sVw   Y  t|d
  tj|d  t
j|d  t r|t|d  | j jtjk}t
j rtdt
j|| t rtdt
j|| t rtdt
j|| t rtdt
j|| t rtdt
j || d S d S )Nr	   
rng_state_.pthz$Didn't find an RNG file for process zr, if you are resuming a training that wasn't launched in a distributed fashion, reproducibility is not guaranteed.rng_state.pthzDidn't find an RNG file, if you are resuming a training that was launched in a distributed fashion, reproducibility is not guaranteed.Trm  pythonnumpyr   r   CUDANPUHPUMLUMUSA)!r   r  process_indexrb  r2  r  r  r  r  r   r   ry  r  setstater   	set_stateset_rng_stater   r  rD  re   rE  cudais_availablerL   r   npur   hpur   mlur   musa)r  r  r  rng_filecheckpoint_rng_stateis_distributedr   r   r   r   "  sJ   

zTrainer._load_rng_statec              
   C   s   d}| j jdurt| j j}|dsd| }z|| }W n ty8 } ztd| dt|  d|d}~ww | j jr@tjntj	}| j
jdu rW| j jrQtdntd| j
_||| j
jrt|| j
_| j jtjtjfv rr| j
j| j
_d	}|S )
z
        Determine if the model should be saved based on the evaluation metrics.

        Returns:
            bool: True if a new best metric was found, else False
        FNr  r  r  zJ. Consider changing the `metric_for_best_model` via the TrainingArguments.z-infinfT)r   r   r  r  r   r  greater_is_betterr   greaterlessrp  r  r  r  rU   STEPSEPOCHr3  best_global_step)r  r  r  r  r  metric_valuer  operatorr   r   r   r  M  s4   


zTrainer._determine_best_metricc                 C   sz  t  d| jj }| jd u r|d u r|   | j|d}tj||}| j	|dd | j
jtjtjfv rR| jjrRt  d| jj }tj||}tj|rR|| j_| j
jse| | | | | | | j
jrdd | jj| jg D D ]#}|jj}	| }
t| jj|	 tr| jj|	 |
 qv|
| jj|	< qv| j tj|t! | j
j"r| #| | j
jr| j$d|d d S d S )	NrF  )r  Tr0  c                 S   r  r   r  r  r   r   r   r     r  z,Trainer._save_checkpoint.<locals>.<listcomp>Fr  )%rM   rp  r3  ry  r+  r-  rb  r2  r  r4  r   r  rU   r  r  r  r  r'  save_only_model_save_optimizer_and_scheduler_save_scaler_save_rng_statera  rY  r   rw  r2  r3  r   r  r   rV  r5  r6  r_  _push_from_checkpoint_rotate_checkpoints)r  r   r  checkpoint_folderrX  r   best_checkpoint_folderbest_checkpoint_dirr  cb_namecb_stater   r   r   r  s  s<   




zTrainer._save_checkpointc              	   C   s  t  tj  tj  d}tj r+| jj	t
jkr#tjj  |d< ntjj  |d< t r4t |d< t rO| jj	t
jkrGtjj  |d< ntjj  |d< t rj| jj	t
jkrbtjj  |d< ntjj  |d< t r| jj	t
jkr}tjj  |d< ntjj  |d< t r| jj	t
jkrtj |d< ntj |d< tj|dd	 | jjd
krt|tj|d d S t|tj|d| jj d d S )N)r  r  r   r  r   r  r  r  r  Tr  r	   r  r  r  )r  getstater   	get_stater   get_rng_stater  r  r   rD  re   rE  get_rng_state_allr   r  r   r  r   r  r   r  r   r  rb  rc  r  r7  r2  r  r  )r  r   
rng_statesr   r   r   r    s<   
&zTrainer._save_rng_statec                 C   sv  t  rhtd | jr1| j | j d}tj|t	j
|d| jj d| jj dt dd nt| j t	j
|t tjdd	}t| j t	j
|t t| W d    n1 sbw   Y  nt r| jjdd
}t  t dkstjjjrtj|t	j
|tdtjjjd n]| jrdtt | j!j"j#$ v }|rt%| jr| j!j"|dd n<| j!"| n5| j&rt'| j(jj)| j(| j|fi t*  t+| j(jj)| j(| j| j| n| jj,rt-| j t	j
|t | jot.| jt/ }| jj,r5| jr|r9t  s7tjdd	}t-| j t	j
|t W d    n	1 s*w   Y  t| d S d S d S d S )Nsaving_optimizer_states)rT  shard_metadatar  -of-rF  Fmaster_onlyTrecord)gather_if_shardr   )r  v3exclude_frozen_parameters)r  )0r   r  r(  r  rT  r8  r   get_shard_metadatar7  rb  r2  r  r   r  r  r9  r/  catch_warningsrU  r:  rJ   r   local_state_dictro  r  rdp_rankrp  rq  shard_optimizer_staterC  r6  r   r   rN  save_checkpointr   r  r   rJ  r   rO  r  r   r   ra  r   r   r   )r  r   optmcaught_warningsopt_state_dict accept_exclude_frozen_parametersis_deepspeed_custom_schedulerr   r   r   r    s   

z%Trainer._save_optimizer_and_schedulerc              
      s:   du rdS | j r;t| jts9tjdd}| jtjt	j
 tdd W d   n1 s0w   Y  t| dS t rJtt	j
 td n)t	j
t	j
 tpst	j
t	j
 tpst	j
 ost fddt	 D }| jrtt	j
 d| jj d	t n|}|rt	j
t	j
 trt r| jrtjt	j
 d
| jj d| jj d	t ddd}|d }ntjt	j
 tddd}tjdd}tjt	j
 tddd}W d   n1 sw   Y  t| t|| jj t|| jj | j| | j| dS t r6t	j
t	j
 dr) fdd}n fdd}| j | n6| jjdkrA| jjnd}| j!r[t"| j#j$j%| j#| j| j& fi t'  n| jtjt	j
 t|dd tjdd}| jtjt	j
 tdd W d   n	1 sw   Y  t| dS dS dS )z3If optimizer and scheduler states exist, load them.NTr  r  _*c                 3   s8    | ]}t jt j |rtd d |v V  qdS )rD  r   N)rb  r2  ra  r  OPTIMIZER_NAME_BINr  rc  r  r   r   r  1  s    
z8Trainer._load_optimizer_and_scheduler.<locals>.<genexpr>z	rank*-of-rF  r  r  r   rk  rT  rg  c                    s"   | tjtj tdd d S )NTr  )rz  ro  ry  rb  r2  r  r9  modoptr  r   r   opt_load_hook_  s   "z<Trainer._load_optimizer_and_scheduler.<locals>.opt_load_hookc                    sJ   t r|tjtj tddd d S |tjtj tdd d S )NT)r  back_compatr  )rn  rz  ro  ry  rb  r2  r  r9  r  r  r   r   r  d  s
   "r	   )(rC  r   rU  r   r/  r  rz  r   ry  rb  r2  r  r:  rJ   r   globr9  r  r  ra  rR  rv  r  r   r  r   r  r  send_cpu_data_to_devicer   rT  rN  register_post_step_hookrJ  r   rO  rp  r  r   r   )r  r  r  checkpoint_file_existsoptimizer_statelr_scheduler_stater  rl  r   r  r   r    s   &
	z%Trainer._load_optimizer_and_schedulerc                 C   s   z| j j}W n
 ty   Y d S w |d u rd S t rHtd tjdd}t| j j	 t
j|t t| W d    n1 sCw   Y  | jjrzt s|tjdd}t| j j	 t
j|t W d    n1 sow   Y  t| d S d S d S )Nsaving_scaler_stateTr  )rO  scalerr  r   r  r(  r/  r  r7  r8  rb  r2  r  SCALER_NAMErJ   r   ra  r   )r  r   r  r  r   r   r   r    s&   

 zTrainer._save_scalerc                 C   s   |du rdS t jt j|t}|r{t rNtjdd}tj	t j|tddd}W d   n1 s4w   Y  t
| t|| jj | jj| dS tjdd}| jjtj	t j|tdd W d   n1 spw   Y  t
| dS dS )z If scaler state exists, load it.NTr  r   rk  r  )rb  r2  r  r  r  r   r/  r  r   ry  rJ   r  r  r   r   rO  r  rz  )r  r  r  r  scaler_stater   r   r   r    s(   zTrainer._load_scalerc                    sN  | j jsdS g }g }| jj| jg }| jj D ]o\ }t|t	s$|g}t
 fdd|D r fdd|D }t||D ]>\}}|di }|di }	t|di |}
|	 D ]
\}}t|
|| qZt|trn|
| _n||
 | jt|
 q=td q|  qt|d	krtd
d| d |D ]}| j| qdS )zLIf callback states exist and were passed in, restore their states if enabledNc                 3   s    | ]	}|j j kV  qd S r   r2  r3  r   r  stored_callbackr   r   r    s    z/Trainer._load_callback_state.<locals>.<genexpr>c                    s   g | ]
}|j j kr|qS r   r  r	  r
  r   r   r     s    z0Trainer._load_callback_state.<locals>.<listcomp>r   
attributeszPContinuing training from checkpoint, restoring any callbacks that were passed inr   zPCheckpoint included callbacks not included in current configuration. Ignoring. (r  r  r   )r   'restore_callback_states_from_checkpointrY  r   rw  rp  r  r  r   r   rR  zipr  r   r  r7   rV  r  r  r  r8  r<  r  rZ  )r  	not_foundnew_callbacksoriginal_callbacksrh  
duplicatesr  callback_datar   r  new_callback	attributer  r   r
  r   r    s>   



zTrainer._load_callback_state   minimizer  r  n_trials	directionbackendr  r#  c           
      K   s   |du rt  }t|}t|  }|  || _| jdu r td|du r'|jn|| _|| _	|du r3t
n|| _|j| ||fi |}	d| _|	S )az  
        Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
        by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
        the sum of all metrics otherwise.

        <Tip warning={true}>

        To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
        reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
        subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
        optimizer/scheduler.

        </Tip>

        Args:
            hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
                A function that defines the hyperparameter search space. Will default to
                [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
                [`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
            compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
                A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
                method. Will default to [`~trainer_utils.default_compute_objective`].
            n_trials (`int`, *optional*, defaults to 100):
                The number of trial runs to test.
            direction (`str` or `List[str]`, *optional*, defaults to `"minimize"`):
                If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you
                should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or
                several metrics. If it's multi objectives optimization, direction is `List[str]`, can be List of
                `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss,
                `"maximize"` when optimizing one or several metrics.
            backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
                The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
                on which one is installed. If all are installed, will default to optuna.
            hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
                A function that defines the trial/run name. Will default to None.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments for each backend:

                - `optuna`: parameters from
                  [optuna.study.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
                  and also the parameters `timeout`, `n_jobs` and `gc_after_trial` from
                  [optuna.study.Study.optimize](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize)
                - `ray`: parameters from [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run).
                  If `resources_per_trial` is not set in the `kwargs`, it defaults to 1 CPU core and 1 GPU (if available).
                  If `progress_reporter` is not set in the `kwargs`,
                  [ray.tune.CLIReporter](https://docs.ray.io/en/latest/tune/api/doc/ray.tune.CLIReporter.html) is used.
                - `sigopt`: the parameter `proxies` from
                  [sigopt.Connection.set_proxies](https://docs.sigopt.com/support/faq#how-do-i-use-sigopt-with-a-proxy).

        Returns:
            [`trainer_utils.BestRun` or `List[trainer_utils.BestRun]`]: All the information about the best run or best
            runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray
            backend.
        NzXTo use hyperparameter search, you need to pass your model through a model_init function.)r   rQ   r   ensure_availablery  r   r.  default_hp_spacer  r#  rY   r  r^  )
r  r  r  r  r  r  r#  r  backend_objbest_runr   r   r   hyperparameter_search  s    @

zTrainer.hyperparameter_searchr  r  c                 C   s   | j jdur| j j|d< | jjr#| j j|d< |dur#td|| j jd i |d| j ji}| j j| | j	
| j| j | j|| _dS )a8  
        Log `logs` on the various objects watching training.

        Subclass and override this method to inject custom behavior.

        Args:
            logs (`Dict[str, float]`):
                The values to log.
            start_time (`Optional[float]`):
                The start of training.
        Nr  r  r*  )r  rL  )rp  r  r   r  r  rc   r3  log_historyrV  rY  on_logrw  )r  r  r  r  r   r   r   r,  /  s   zTrainer.logrh  c                    s   t |trt| fdd| D S t |ttfr(t| fdd|D S t |tjrVd jj	i} j
rNt|sAt|rN|d jjjj i |jdi |S |S )	z|
        Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
        c                    s   i | ]
\}}|  |qS r   _prepare_inputr   r  r   r   r  K  rJ  z*Trainer._prepare_input.<locals>.<dictcomp>c                 3       | ]}  |V  qd S r   r"  )r   r  r  r   r   r  M      z)Trainer._prepare_input.<locals>.<genexpr>r   r   Nr   )r   r   r   r  r  r   r   r  r   r   rC  is_floating_point
is_complexrW  rO  rp  r  r  r   r  )r  rh  r  r   r  r   r#  F  s   
zTrainer._prepare_inputrQ  c                 C   sR   |  |}t|dkrtdd| j d| jjdkr'| jdur'| j|d< |S )z
        Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
        handling potential state.
        r   zThe batch received was empty, your model won't be able to train on it. Double-check that your training dataset contains keys expected by the model: r<  rD  Nmems)r#  r8  r  r  rj  r   r  r  r  rQ  r   r   r   rG  X  s   


zTrainer._prepare_inputsc                 C   s   |   S )zF
        A helper wrapper to group together context managers.
        )autocast_smart_context_managerr  r   r   r   compute_loss_context_managerh  s   z$Trainer.compute_loss_context_managerr=  c                 C   s*   | j rtjjj|| jd}|S t }|S )z
        A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
        arguments, depending on the situation.
        )r=  r   )rl  r   r   r   rI  rt  r   r   )r  r=  ctx_managerr   r   r   r*  n  s
   z&Trainer.autocast_smart_context_managerc                 C   s  |   t| jdrt| jj r| j   | |}t r0t||| jj}|	 
 | jjS |   | j|||d}W d   n1 sGw   Y  ~| jjdur| jj| jj dkrt rftj  n4t rotj  n+t rxtj  n"t rtj  ntddrtj  nt rt d ntj!  i }| jj"t#j$t#j%fv r| & |d< | jj'd	kr|( }| j)rt*+|| j}|,  W d   dS 1 sw   Y  dS | j-s| j.du r|| jj }| j/j0t1j2krd
|d< | j/j,|fi | |
 S )aq  
        Perform a training step on a batch of inputs.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to train.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.

        Return:
            `torch.Tensor`: The tensor with training loss on this batch.
        r*  rO  Nr   z2.0)min_versionzW`torch_empty_cache_steps` is set but HPU device/backend does not support empty_cache().r  r	   Fscale_wrt_gas)3r*  r;  rT  rd  rG  r   r   r   r  reduce_meandetachr  r   r+  compute_losstorch_empty_cache_stepsrp  r3  r   r   xpuempty_cacher   r  r   r  r   r  r   mpsr   r  r<  r  r   rd   r  r  r   rc  r  rk  r   
scale_lossbackwardrQ  r   rO  r  r   r  )r  r   rQ  rO  loss_mbr  r  scaled_lossr   r   r   r  z  sV   





"zTrainer.training_stepc                 C   s  | j dus
| jdurd|v r|d}nd}| jr)i }|dur#||d< i ||}|di |}| jjdkr=|| jj | _|durz| j|}t	|rR|j
j }	n| }	| jdurd| j|||d}
nA|	t v rs| j ||dd}
n2|  ||}
n+t|trd|vrtd	d
|  dd
|  dt|tr|d n|d }
| jjr| js| jr|dur|
| jj9 }
|r|
|fS |
S )z
        How the loss is computed by Trainer. By default, all models return the loss in the first element.

        Subclass and override for custom behavior.
        NlabelsrO  r   r-  T)shift_labelsr  zJThe model did not return a loss from the inputs, only the following keys: r<  z,. For reference, the inputs it received are rD  r   )rv  r   r  rQ  r   r  r  rO  r(   r   r  r   	_get_namer)   r7  r   r  r  r  r  average_tokens_across_devicesnum_processes)r  r   rQ  return_outputsrO  r;  loss_kwargsoutputsr  r  r  r   r   r   r2    sN   
zTrainer.compute_lossc                 C   s   | j jdkS )z
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
        machines) main process.
        r   )r   local_process_indexr  r   r   r   r    s   zTrainer.is_local_process_zeroc                 C   s   t  r	t dkS | jjdkS )z
        Whether or not this process is the global main process (when training in a distributed fashion on several
        machines, this is only going to be `True` for one process).
        r   )r   ro  r  r   r  r  r   r   r   r    s   zTrainer.is_world_process_zeror   r1  c                 C   s~  |du r| j j}t r| | nt r9tj|dd | j }| j j	r+| j
||d tr8ttj|d  nt| jrcdt| jjjjv rbtttdkrb| j| j}| j j	rb| j
||d nJ| jrz| j| j}| j j	ry| j
||d W n2 ty   td | j j	r| j
|i d t | j j	|t!t"g | j#| Y n
w | j j	r| 
| | j j$r|s| j$d	d
 dS dS dS )z
        Will save the model, so you can reload it using `from_pretrained()`.

        Will only save from the main process.
        NTr  )r8  rg  FULL_STATE_DICTz0.24.1z| stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use zero_to_fp32.py to recover weightsz
Model save)commit_message)%r   r   r   	_save_tpur   rb  rc  rN  r8  ra  _savern  r   r2  r  touchrJ  r  rO  rp  r  state_dict_typer   r   accelerate_versionget_state_dictr   rC  r$  r  r  r<  rK   rn   rl   r  r_  )r  r   r1  r8  r   r   r   r4    sN   

zTrainer.save_modelc              	   C   s2  |d ur|n| j j}td|  | j}t  tjddr2tj	|dd t
| j tj|t tf}td | jr| | d}tj|d| j j d	| j j d
t }tj||dd td | j jrddlm} |tj|ddt dd\}}|jj}| j|}	t|	|r|	j||tj| j jd ndtd t|tj|t nSt||st| j||r| j|j|| j jt | tj| j jd n,td t | }
t|
tj|t n|j|| j jtj| j jt | d | j!d ur| j jr| j!| d S d S d S )NSaving model checkpoint to F)localTr  saving_checkpoint)r   r  r  r  rF  r  save_full_checkpointsr   )%consolidate_sharded_model_checkpointsr  zrank*-of-*-)ckpt_prefixckpt_suffixr4  )r8  save_functionsafe_serializationETrainer.model is not a `PreTrainedModel`, only saving its state dict.)is_main_processr8  rS  rT  )rV  rS  rT  r8  )"r   r   r  r  r   r  r  is_master_ordinalrb  rc  r   r7  r2  r  TRAINING_ARGS_NAMErq   r(  r  r8  r  r  r  rn   ra  r  rP  r/  rO  r(   r   save_pretrainedr{  _maybe_convert_to_cpur   )r  r   r   supported_classesckpt	ckpt_pathrP  full_state_dictrE  r  r8  r   r   r   rF  D  sv   






zTrainer._save_tpuc                 C   sh  |d ur|n| j j}tj|dd td|  t stfnttf}t	| j
|sr|d u r2| j
 }t	| j| j
|rL| j| j
j||| j jd n1td | j jrftjj|tj|tddid nt|tj|t n| j
j||| j jd | jd ur| j| n| jd urt| jd	r| jjd urtd
 | jj| t| j tj|t d S )NTr  rL  )r8  rT  rU  r  pt)r   r   zWSaving Trainer.data_collator.tokenizer by default as Trainer.processing_class is `None`)r   r   rb  rc  r  r  r   r&   r   r   r   r8  rO  r(   rY  r{  r|  r   	save_filer2  r  rl   r7  rn   r   r   r;  r   rX  )r  r   r8  r[  r   r   r   rG    s:   







zTrainer._savec                 C   s\   | j jtjkr | j jt| jg| j jd	 
 7  _d| _d S | j j| j7  _d| _d S )Nr  r   )r   rD  re   rE  rp  r  r@   rx  r   r%  r  r  r   r   r   r+    s   

zTrainer.store_flosc                 C   s  g }dd t || dD }|D ]1}|r#|tj||f qtd| d|}|d urD| d urD|t	| d |f qt
|}dd |D }| jjd urtt | jj|v r|tt | jj}	t|	t|d D ]}
||
d	  ||
 ||
< ||
d	 < qt|S )
Nc                 S   s    g | ]}t j|rt|qS r   )rb  r2  ra  r  )r   xr   r   r   r     s     z/Trainer._sorted_checkpoints.<locals>.<listcomp>-*z.*z	-([0-9]+)r   c                 S   r3  )r	   r   )r   r  r   r   r   r     r7  r  r	   )r   r  rV  rb  r2  getmtimerematchgroupsr  sortedrp  r'  r  indexr  r8  )r  r   checkpoint_prefixr  ordering_and_checkpoint_pathglob_checkpointsr2  regex_matchrY  best_model_indexrP  r   r   r   r.    s"   $zTrainer._sorted_checkpointsc                 C   s   | j jd u s| j jdkrd S | j||d}t|| j jkrd S | j j}| jjd ur9| j jdkr9|d | jjkr9d}tdt|| }|d | }|D ]}td| d t	j
|dd	 qJd S )
Nr   r  r	   r  r  r  r  Tr  )r   r/  r.  r8  rp  r'  rb  r  r  r1  r2  )r  r  r   rY  r/  number_of_checkpoints_to_deletecheckpoints_to_be_deletedr  r   r   r   r    s    zTrainer._rotate_checkpointsr  r  metric_key_prefixc              
   C   s  |du}|r|n| j }t|tr3i }| D ]\}}| j|r |n||| d| d}|| q|S | j  | |}	| j	rDt
|	}	t }
| jjrO| jn| j}||	d| jdu r\dnd||d}| jj| jj }| d|jv r||
|j| d 7 }
| d|jv r|
|j| d 7 }
|jt||
|jt|j| d	 | |j tj| jjv rtt  | j !| j| j"| j#|j| _#| j$|j |jS )
a  
        Run evaluation and returns metrics.

        The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
        (pass it to the init `compute_metrics` argument).

        You can also subclass and override this method to inject custom behavior.

        Args:
            eval_dataset (Union[`Dataset`, Dict[str, `Dataset`]), *optional*):
                Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
                not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will
                evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the
                `__len__` method.

                <Tip>

                If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run
                separate evaluations on each dataset. This can be useful to monitor how training affects other
                datasets or simply to get a more fine-grained evaluation.
                When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one
                of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets
                `data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the
                loss on `data1` and `metric_for_best_model="eval_data2_loss"` for the loss on `data2`.

                </Tip>

            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
            metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "eval_bleu" if the prefix is "eval" (default)

        Returns:
            A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
            dictionary also contains the epoch number which comes from the training state.
        NrE  )r   r  rp  
EvaluationT)r  prediction_loss_onlyr  rp  _jit_compilation_time_model_preparation_timer  r  )%r   r   r  r  r  rW  r(  r)  r  r  r$   r  r   r  prediction_loopevaluation_loopr   r  r  r  rc   r  mathceilr,  r   r"  r'  r  r#  r$  r%  rY  on_evaluaterp  rw  r  )r  r   r  rp  overrider  eval_dataset_name_eval_datasetdataset_metricsr  r  	eval_loopr  total_batch_sizer   r   r   r    sX   -




	zTrainer.evaluater  c           	   
   C   s   | j   | |}t }| jjr| jn| j}||d||d}| jj| jj	 }| d|j
v r:||j
| d 7 }| d|j
v rL||j
| d 7 }|j
t|||jt|j| d | j| j| j| j|j
| _| j |j
 t|j|j|j
dS )a  
        Run prediction and returns predictions and potential metrics.

        Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
        will also return metrics, like in `evaluate()`.

        Args:
            test_dataset (`Dataset`):
                Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
                `model.forward()` method are automatically removed. Has to implement the method `__len__`
            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
            metric_key_prefix (`str`, *optional*, defaults to `"test"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "test_bleu" if the prefix is "test" (default)

        <Tip>

        If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
        in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
        one array. The padding index is -100.

        </Tip>

        Returns: *NamedTuple* A namedtuple with the following keys:

            - predictions (`np.ndarray`): The predictions on `test_dataset`.
            - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
            - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
              labels).
        
Prediction)r  r  rp  rs  rt  ru  )predictionsr  r  )r(  r)  r  r  r   r  rv  rw  r  r  r  rW  rc   r  rx  ry  rY  
on_predictrp  rw  r  rS   r  r  )	r  r  r  rp  test_dataloaderr  r  r  r  r   r   r   predict^  s.   
$
	zTrainer.predictrr  c                 C   s  | j }|dur	|n|j}| jr| jdu rt| ddd\}}| j| jd|d}t| jj	dkrp|| ju rpt

 }	| jsC| jrI| jjdkrI| j|n| jj|dd}tt

 |	 d	| _| jra|| _|| juri|| _| jrp| j| _| js|jr|jtj|jd
}n|jr|jtj|jd
}| j j}
td| d t|rtd| |  ntd td|
  |  t | j!drt"| j!jr| j!  || j#_$t%|dd}|j&dkrd| _'t(| j j)dd}t(| j j)dd}t(| j j)dd}t(| j j)dd}d}i }d}t*|D ]N\}}t+|}|dur ||7 }|
du r |}
| j,||||d\}}}t%| jdd}d|j-v r@| .|| nd}t/ rJt01  |dur\| 2|3|
}|4| |dur~| jj5|ddd}| 2|}| j j6ry|dkr~|4| |dur| jj5|ddd}|dur| jj5|ddd}| j7dur| 7||}| 2|}| j j6r|dkr|4| |dur| 2|}| j j6r|dkr|4| | j#8|| j9| j:| _:| j j6r,| j;dur!|dur!|dur!| jj<j=}i }d|j-v r|nd|d< d|j-v r|nd|d< | j;t>d(||d||d}~~~~tj?@  q|jAdurU|d |jA dkrU|B  |B  |B  |B  ~~~~tj?@  q| jjC| _2|j&rkt | drktD| d |E }|E }|E }|E }t|rt|}n tF|tGrt%|d ddkr|j}nt|r| |}n|}|dkr|dkr|}| j;dur|dur|dur| j j6sd|j-v r|nd|d< d|j-v r|nd|d< | ;t>d(||d|}n|du ri }tH|}tF|tIr|rtJK|L M || d!< ntF|tJjNr!|L M || d!< t | d"r/| jO|| d#< t | d$r=| j|| d%< tI|P D ]}|Q| d&sZ|R||| d&| < qCtS||||d'S ))
        Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.

        Works both with or without labels.
        Nr   Tr  	inferenceFr  r  r  evaluation_modert  r   r   
***** Running  *****r  z  Num examples: Unknown  Batch size = r  r  padding_indexr  r  r  rQ  r	   )dim	pad_indexr  r  lossesr  r  r   r  r  _lossr  rs  model_preparation_timert  rE  r  r  r  r  r   )Tr   rr  rC  r$  r!   r  r   r8  rO  _modelsr  rJ  r  r  prepare_modelr  r  rN  r%  rG  r  r   float16r   rH  rs  r  r  r  r^   r  r  r;  rT  rd  rY  r  r4  r  r  r:   eval_do_concat_batchesr  rB   prediction_stepinclude_for_metricsr#  r   r  r  gather_functionrepeatr  pad_across_processesr  r   on_prediction_steprp  rw  r   r  end_of_dataloaderrP   r  r5  eval_accumulation_stepsto_cpu_and_numpygather_for_metricsr&  
get_arraysr   r;   rZ   r   r   concatenater  r  r   r  r  r  r  rO   )r  r  r  rr  r  rp  r   rE  r   r  r  r   
all_losses	all_preds
all_labels
all_inputsr  eval_set_kwargsobserved_num_examplesrL  rQ  observed_batch_sizer  r  r;  r  inputs_decodeis_last_stepbatch_kwargsr  r  r   r   r   rw    s  


















 
 








zTrainer.evaluation_loopc                 C   s|   |du rdS t  r|du rd}t||}|S t rt|}|S | jjdur,| jjjdks8| jjdu r<| jjdkr<t|}|S )
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        Nnested_gatherNOr  )	r   rI   r   r   r   distributed_stater  
local_rankrA   r  tensorsnamer   r   r   r  t  s   
zTrainer._nested_gatherc              	      s  t | jdkr	dntfdd| jD }dd}|du r"| j}t | jdkr-|r-dnd}|  du rJt| jdrHt| jj	d	d
g ng  |sN|rgt
tfdd| jD }t |dkrf|d }nd}t  t rt|}	|sz|rt|	tr|	d }
t fdd|	 D }n
|	d }
|	dd }|
   }t|}nd}t|	trt fdd|	 D }n|	}t|}n|s|r|   | j|dd\}}W d   n1 sw   Y  |  }t|trt fdd| D }nK|dd }nDd}|   |di }W d   n	1 s!w   Y  t|tr:t fdd| D }n|}| jjdkrL|| jjd  | _W d   n	1 sWw   Y  |rd|ddfS t
|}t |dkrs|d }|||fS )a  
        Perform an evaluation step on `model` using `inputs`.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to evaluate.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.
            prediction_loss_only (`bool`):
                Whether or not to return the loss only.
            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.

        Return:
            Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
            logits and labels (each being optional).
        r   Fc                 3   s    | ]
}  |d uV  qd S r   r  r   rQ  r   r   r    r0  z*Trainer.prediction_step.<locals>.<genexpr>return_lossNTr  keys_to_ignore_at_inferencepast_key_valuesc                 3   r$  r   r  )r   r  r  r   r   r    r%  r	   r  c                 3   &    | ]\}}| d g vr|V  qdS r  Nr   r   r  r   r   r       $ c                 3        | ]\}}| vr|V  qd S r   r   r   r  r   r   r        )r@  c                 3   r  r  r   r   r  r   r   r    r  c                 3   r  r   r   r   r  r   r   r    r  r   )r8  rz  allr  rr   rG  r;  r   r4  r  rG   r  r   rJ  r   r   r   r  r  r0  r1  r   r   r+  r2  r  r   r  r  )r  r   rQ  rr  r  
has_labelsr  loss_without_labelsr;  raw_outputsr9  	logits_mbr  r  rB  r   )r  rQ  r   r    sr   *









*

zTrainer.prediction_stepc                 C   s   t | jdr| j|S dS )a  
        For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
        operations for every backward + forward pass. If using another model, either implement such a method in the
        model or subclass and override this method.

        Args:
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

        Returns:
            `int`: The number of floating-point operations.
        r  r   )r;  r   r  r)  r   r   r   r    s   zTrainer.floating_point_opstokenc                 C   sn   |   sdS | jjdu rt| jj j}n| jj}|dur |n| jj}t||| jj	dd}|j
| _d| _dS )zE
        Initializes a git repo in `self.args.hub_model_id`.
        NT)r  privater  )r  r   r^  r   r   absoluter  	hub_tokenr   hub_private_reporepo_idpush_in_progress)r  r  	repo_namerepo_urlr   r   r   r`    s   
zTrainer.init_hf_repolanguagelicensetagsr  finetuned_fromtasksdataset_tagsdataset_argsc
                 C   s  |   sdS tj| jjd}
d}tj|
rIt|
j	
d}|dk}t|
j	j}|durI|durIt|tr;|g}|D ]}||vrH|| q=tj| |||||||||	d
}| }t|
d}|| W d   n1 sqw   Y  |r| j| j| jj dS dS )a  
        Creates a draft of a model card using the information available to the `Trainer`.

        Args:
            language (`str`, *optional*):
                The language of the model (if applicable)
            license (`str`, *optional*):
                The license of the model. Will default to the license of the pretrained model used, if the original
                model given to the `Trainer` comes from a repo on the Hub.
            tags (`str` or `List[str]`, *optional*):
                Some tags to be included in the metadata of the model card.
            model_name (`str`, *optional*):
                The name of the model.
            finetuned_from (`str`, *optional*):
                The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
                of the original model given to the `Trainer` (if it comes from the Hub).
            tasks (`str` or `List[str]`, *optional*):
                One or several task identifiers, to be included in the metadata of the model card.
            dataset_tags (`str` or `List[str]`, *optional*):
                One or several dataset tags, to be included in the metadata of the model card.
            dataset (`str` or `List[str]`, *optional*):
                One or several dataset identifiers, to be included in the metadata of the model card.
            dataset_args (`str` or `List[str]`, *optional*):
               One or several dataset arguments, to be included in the metadata of the model card.
        Nz	README.mdFlibrary_namer   )	r  r  r  r  r  r  r  r  r  w)r  rb  r2  r  r   r   r  r   ry  rh  r  r  r   r  rV  r%   from_trainerto_model_cardopenwriterO  r(   r   create_or_update_model_card)r  r  r  r  r  r  r  r  r  r  model_card_filepathis_peft_libraryr  existing_tagsri  training_summary
model_cardrf  r   r   r   create_model_card  sD   %

zTrainer.create_model_cardc              	   C   sB  |   r| jjtjkrd S | jjs| jd ur| j sd S | jj}t	t
tg}ttfD ]>}tj||}tj|rh|| t|}t| }W d    n1 sTw   Y  tt|d  }|| q*t rt|tttg |D ]}	tjtj||	rt tj||	tj||	 qv| j!d ur| j!"| t#$| jtj|t% | jj&t'j(krd| j)j* }
n	dt+| j)j, }
t-| j.||
| jj/ddt0 dgd}|g}| jjtj1tj2fv r| jjtj1krdnt3|j4}t-| j.|||
d	 | jj/dd
}|| | jd u s| j rt5|| _d S | jj6| d S )N
weight_mapzTraining in progress, step zTraining in progress, epoch Tr  rb  )r  folder_pathrE  r  run_as_futureignore_patternszlast-checkpointz, checkpoint)r  r  path_in_reporE  r  r  )7r  r   hub_strategyrR   ENDhub_always_pushr  is_doner   rj   rn   rl   rm   rk   rb  r2  r  r  rV  r  jsonloadsreadr   r6  r7  extendr   rg   ri   rh   r1  r  r   rY  r   r7  rX  r  rU   r  rp  r3  r  r  r   r^  r  rM   
CHECKPOINTALL_CHECKPOINTSr   r  rp   jobs)r  r  r   modeling_files
index_file
index_pathrf  rh  shard_filesmodeling_filerE  model_push_job	push_jobsr  checkpoint_pushr   r   r   r  ^  sh   



 
	
zTrainer._push_from_checkpointc                 C   sB   t | dsd S | jd ur| j std | j  d S d S d S )Nr  z\Waiting for the current checkpoint push to be finished, this might take a couple of minutes.)r;  r  r  r  r  wait_until_doner  r   r   r   r3    s   

zTrainer._finish_current_pushEnd of trainingrE  blockingrevisionc              	   K   s6  | dd}|du r%| jjr%| jjdu rt| jjj}n	| jjdd }|dur+|n| jj}| jdu r:| j	|d | j
dd |  sFdS t| jddduryd	|vrWg |d	< t|d	 tre|d	 g|d	< | jjD ]}||d	 vrx|d	 | qi| jdd|i| |   t| j| jj||| d
t dg|dS )u  
        Upload `self.model` and `self.processing_class` to the 🤗 model hub on the repo `self.args.hub_model_id`.

        Parameters:
            commit_message (`str`, *optional*, defaults to `"End of training"`):
                Message to commit while pushing.
            blocking (`bool`, *optional*, defaults to `True`):
                Whether the function should return only when the `git push` has finished.
            token (`str`, *optional*, defaults to `None`):
                Token with write permission to overwrite Trainer's original args.
            revision (`str`, *optional*):
                The git revision to commit from. Defaults to the head of the "main" branch.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments passed along to [`~Trainer.create_model_card`].

        Returns:
            The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the
            progress of the commit if `blocking=True`.
        r  N/r  )r  Tr0  
model_tagsr  r  rb  )r  r  rE  r  r  r  r  r   )r  r   ra  r^  r   r   r  r  r  r`  r4  r  r4  r   r   r  r  rV  r  r3  r   rM   )r  rE  r  r  r  r  r  	model_tagr   r   r   r_    s>   
zTrainer.push_to_hubc           &      C   s  | j }t|std|dur|n|j}| jr%| jdu r%t| ddd\}}| j| jd|d}t	| j
jdkrd|| ju rd| jsA| jrG| j
|n| j
j|dd}| jrU|| _|| jur]|| _| jrd| j| _| js|jrt|jtj|jd	}n|jr|jtj|jd	}t|d
dr|jn|j}	|	du rtd| |}
td| d td|
  td|	  d}d}d}d}d}i }td|j}t ||
|	d}|sd}t!|drt"|j#t$r|j#j}t ||
|d}t ||
|d}t ||
|d}|%  t!| j&dr
t'| j&j%r
| j&%  |j(dkrd| _)|| j*_+t,|D ]\}}| j-||||d\}}}t| jdd}d|j.v r@| /|| nd}|dur\|0|	}|du rS|ntj1||fdd}|duro|du rh|nt2||dd}|dur|du r{|nt2||dd}|dur|du r|nt2||dd}| j*3|| j4| j5| _5| j j6r| j7dur|dur|dur| j
j8j9}i } d|j.v r|nd| d< d|j.v r|nd| d< | j7t:d(||d| |d}| j j6s|j;dur.|d |j; dkr.|<| =|d |s|<| =|d  |<| =|d! |<| =|d" ~~~~tj>?  d#\}}}}q|j(r?t!| d$r?t@| d$ |<| =|d |sf|<| =|d  |<| =|d! |<| =|d" |A }!|sq|A nd}"|sz|A nd}#|s|A nd}$| j7dur|"dur|#dur| j j6sd|j.v r|!nd|d< d|j.v r|$nd|d< | 7t:d(|"|#d|}n|du ri }tB|}|!dur|!C D || d%< tE|F D ]}%|%G| d&s|H|%|| d&|% < qtI|"|#||
d'S ))r  z+dataloader must implement a working __len__Nr   Tr  Fr  r  r  _is_accelerate_preparedz\Batch size cannot be None. Ensure the dataloader has a valid batch_size or total_batch_size.r  r  r  r  r	   )make_multiple_ofr  r  r  r  r  rQ  )r  r  r  r  r  r  r  eval_losses
eval_predseval_label_idseval_inputs_ids)NNNNr  r  rE  r  r   )Jr   r^   r  rr  rC  r$  r!   r  r   r8  rO  r  rJ  r  r  rN  r%  rG  r  r   r  r   rH  rs  r4  r  r  r  r  r  rb  r  r9   r;  r   r  r?   r  rT  rd  r  r  rY  r  r  r  r  r#  r  catrF   r  rp  rw  r  r   r  r  rP   r  
add_arrays_gather_and_numpifyr  r5  r&  finalizerZ   r  r  r   r  r  r  rO   )&r  r  r  rr  r  rp  r   rE  r   r  r  losses_host
preds_hostlabels_hostinputs_hostr  r  r  eval_losses_gathererr	  preds_gathererlabels_gathererinputs_gathererrL  rQ  r  r  r;  r  r  r  r  r  	eval_losspredsr  
inputs_idsr  r   r   r   rv    s   





 




 

 





zTrainer.prediction_loopc                 C   sX   |du rdS t  rt||}t	|S t rt|}t	|S | jjtjkr(t|}t	|S )r  N)
r   rI   r   r   r   rD  re   rE  rA   rH   r  r   r   r   r    s   
zTrainer._gather_and_numpifyc                 C   sB  |   sdS ddg}tjtj| jjdr7ttj| jjd}| }W d   n1 s1w   Y  nd}|}|D ]}||vrT|	drM||7 }q=|d| 7 }q=||krttj| jjdd}t
d|  || W d   n1 s}w   Y  | jd td	 | j s| jd
 | j  dS dS )z8Add SageMaker Checkpointing patterns to .gitignore file.Nz*.sagemaker-uploadingz*.sagemaker-uploadedz
.gitignorer  
r  z"Writing .gitignore file. Content: g      ?z'Add *.sagemaker patterns to .gitignore.)r  rb  r2  r  r  repo	local_dirr  r  rP  r  r'  r  git_addr  sleepis_repo_clean
git_commitgit_push)r  patternsrf  current_contentcontentpatternr   r   r   _add_sm_patterns_to_gitignore  s6   




z%Trainer._add_sm_patterns_to_gitignorec           	   
      s  i }t dr| jjjd ur| jjj}d|v r&| jjdkr td|d | j_| jj  t drKg d}td"i  fdd|D }t drK| jj|_ 	d	}t d
s[|rZt
dn|rf| jjsftd ||_ 	d d| jji}t dr}||d< n|  | jjdkrd| _tttdkrt| jjd|d< ntdtd"i || _| jj| _dt| jj v rtj | j| jj!d| _t"| jj#dd d u| _$t"| jj#dd d u| _%t"| jj#dd d u| _| j%r| jj#j&}dD ]}t'||| jj()|t"|| q|j*r| jj+rtd| j$r#t"| jdd d u r#| ,  | jj-rD| j$s0| j%rD| jj.rD| j$r;dnd}t| d| j$rZ| jj#jj/dkrZ| jj0rZtd| jj-rr| j%rtd t1| jj#j&j2v rvtd!d S d S d S )#Nr   r  r	   zThe `AcceleratorConfig`'s `num_steps` is set but `gradient_accumulation_steps` is greater than 1 in the passed `TrainingArguments`If using the passed `AcceleratorConfig` is desired, do not set the `TrainingArguments` `gradient_accumulation_steps`.)split_batchesdispatch_batcheseven_batchesuse_seedable_samplerc                    s   i | ]}|  |qS r   )r  )r   r  accelerator_configr   r   r    r   z>Trainer.create_accelerator_and_postprocess.<locals>.<dictcomp>z1.1.0non_blockingr  zp`non_blocking` is only supported in accelerate v0.30.0 and above. Please upgrade accelerate to use this feature.zx`non_blocking` is enabled but `dataloader_pin_memory` is not. For the best performance, it's recommended to enable both.gradient_accumulation_kwargsr  dataloader_configTr   )tp_sizetorch_tp_pluginz4Requires accelerate>1.3.0 to use Tensor Parallelism.use_gather_object)r5  r  )limit_all_gathersactivation_checkpointingzThe activation_checkpointing in FSDP config and the gradient_checkpointing in training arg can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic when using FSDP.r  	DeepSpeedr  zJ can't be used with `save_only_model` along with `load_best_model_at_end`.   zo`auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3. Please consider using Zero-2, Zero-1, or FSDPSHARDED_STATE_DICTzWsave_only_model option is not compatible with FSDP state dict type 'SHARDED_STATE_DICT'r   )3rt   r   r/  r1  r  r  to_dictr   	data_seedr  r=  r  r  r<  r0  r  rW  r3  is_tp_enabledr   r   rJ  r   r   rO  r  r  r   r   r   r  r  r  eval_use_gather_objectr4  rp  rC  rJ  r  r  rA  r  r7  r  r  r  r  
zero_stager  r  rI  )	r  grad_acc_kwargsr  r2  r0  r   r  r  wrapperr   r.  r   r&    s   








 z*Trainer.create_accelerator_and_postprocessc                 C   sB   ddl m} | jjj}||jj|_|jj|_|j| j	| dS )zO
        Sets values in the deepspeed plugin based on the Trainer args
        r   r  N)
r  r  rO  rp  r  r  r  deepspeed_configr  r   )r  r  r  	ds_pluginr   r   r   r  Y  s
   

z#Trainer.propagate_args_to_deepspeedc                 C   s   | j rOt| jrQddlm} ddlm} t| jj|r$|| j| j	j
j_t| jdd tjkrS| jjjjjrUtttdkrW| j	j
jj| jjjjdd d S d S d S d S d S d S )Nr   )
PeftConfig)fsdp_auto_wrap_policyr   z0.27.0T)r{  )rJ  r   r   r   rD  peft.utils.otherrE  r   active_peft_configrO  rp  r  r  r4  r   rK  r   r?  bnb_4bit_quant_storager&  r   r   rJ  set_mixed_precision)r  rD  rE  r   r   r   r  e  s   

z"Trainer._fsdp_qlora_plugin_updatesc              	   C   s   g }d }t |D ]}z	|t| W q ty   Y  nw t|dko1d|d v o1| jp1| jd u}|rKztdd |D }W n tt	fyJ   Y nw |d urv| j
jr[| j| }t|rv||}| j
jdkrv| dkrv|d}||fS )Nr   r;  c                 S   s   g | ]}|d   d qS )r;  r  )ner%  )r   r  r   r   r   r     s    z-Trainer.get_batch_samples.<locals>.<listcomp>r	   )r  rV  rE  StopIterationr8  rQ  r   r%  r  r  r   r>  rO  r  r   	is_tensorr  rc  r  	unsqueeze)r  rI  rM  r   rN  rO  rE  count_num_items_in_batchr   r   r   r  u  s8   



zTrainer.get_batch_samplesr5  c                 C   s   |j }|dk }t|rt|nd}|dur't||j d}|r't|j| }|rS| |}|j dkrD|| t	|| dk }	|| }
n,t|j}	| ||j }
n|j dkrht
j}	|}||j  }|j | }
ntd|j  |	|||
|||fS )a  
        Calculates and returns the following values:
        - `num_train_epochs`
        - `num_update_steps_per_epoch`
        - `num_examples`
        - `num_train_samples`
        - `epoch_based`
        - `len_dataloader`
        - `max_steps`
        r   Nr	   zYargs.max_steps must be set to a positive value if dataloader does not have a length, was )re  r^   r8  rb  r  rx  ry  rf  r  r  sysmaxsizer  )r  r   r  r5  re  r8  r9  r6  r  rf  r7  r   r   r   r    sD   





z#Trainer.set_initial_training_values)NNNNNNNNNNr   NN)r  Nr   )F)TN)NNN)NNNNN)NNr  r  NNr  )FN)NF)NNr  )Nr  )	NNNNNNNNN)r  TNN)r3  
__module____qualname____doc__trainer_pt_utilsr   r   r   r   r   r   r   r&   r   Modulerf   r   r   r   r   r  r  r0   r    r   r-   r   rP   r   r6   r  r   r   	OptimizerrU  LambdaLRr   r   r  r  propertyr   setterr  r  rZ  r  r  rL  r  r  r  rg  rh  Samplerr  r   r  r  r  r  r  r  r  r   r1  r9  	parameterr   r:  staticmethodr  r  r  r  r  r  r/  r)  r-  rV  r~  r^  rp  r  r  r*  r  r-  r  r*  r~  r  r  r   r  r  r  r  r  r  r  r  rQ   rN   r  r,  r#  rG  r+  r*  r  r2  r  r  r4  rF  rG  r+  rM   r.  r  r  rS   r  rO   rw  r  r  r  r`  r  r  r3  r_  rv  r  r)  r&  r  r  r  r  r   r   r   r   r   <  sp   _

   !
# -="C"
   9.8


+
 

r   h
 z

)+&42Am)	
$T&6
P43H+



i

D

 
W

 i	

JB	
T

 ;
'
q-r   (  rS  r   r  r  r  importlib.metadatar   r   r  rx  rb  r  rd  r1  rO  r'  r  r/  collections.abcr   pathlibr   typingr   r   r   r   r   integrationsr
   huggingface_hub.utilsrg  r  r  r   r   torch.distributeddistributedr)  huggingface_hubr   r   r   	packagingr   r   torch.utils.datar   r   r   r   r   r  r   configuration_utilsr   data.data_collatorr   r   r   debug_utilsr   r   !feature_extraction_sequence_utilsr   feature_extraction_utilsr   r  r   r   image_processing_utilsr    integrations.deepspeedr!   r"   r#   integrations.tpur$   	modelcardr%   modeling_utilsr&   r'   r(   models.auto.modeling_autor)   r*   optimizationr+   r,   processing_utilsr-   pytorch_utilsr.   r/   tokenization_utils_baser0   trainer_callbackr1   r2   r3   r4   r5   r6   r7   r8   rT  r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   trainer_utilsrM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   re  rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r~   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   utils.deprecationr   utils.quantization_configr   rW  r\  utils.notebookr   r	  r   r  torch_xla.core.xla_modelr   	xla_modelr  torch_xla.debug.metricsr'  r  r$  	torch_xlaXLA_VERSIONr   r  torch_xla.distributed.spmdspmdr  torch_xla.runtimeruntimer  !smdistributed.modelparallel.torchmodelparallelro  smdistributed.modelparallelSMP_VERSIONrn  r   r   r   r   safetensors.torchr|  r   r   
accelerater   r   rJ  accelerate.stater   r  r   r   r   r   r   r   r   DATA_SAMPLERSr   accelerate.data_loaderr   r   r   r   r   r   r   
get_loggerr3  r  rX  r6  r9  r  r  r:  rb  r   r   r   r   r   <module>   s   (X
d+$




