o
    h@                    @   sr  d dl Z d dlZd dlZd dlZd dlZd dlmZ d dlmZ d dl	m
Z
mZ d dlmZmZmZmZmZmZ d dlmZ ddlmZmZmZmZmZmZmZmZ d	d
lmZ e r_d dl Z e!e"Z#G dd de$eZ%G dd de$eZ&G dd de$eZ'eG dd dZ(eG dd de(Z)eG dd de(Z*G dd de+eZ,eG dd de(Z-eG dd de(Z.eG dd de(Z/eG dd  d e(Z0eG d!d" d"e(Z1eG d#d$ d$e(Z2eG d%d& d&e(Z3G d'd( d(e(Z4eG d)d* d*e(Z5eG d+d, d,e(Z6eG d-d. d.e(Z7eG d/d0 d0e(Z8eG d1d2 d2e(Z9eG d3d4 d4e(Z:G d5d6 d6e(Z;dS )7    N)	dataclass)Enum)	Parameter	signature)AnyDictListOptionalTupleUnionversion   )is_auto_awq_availableis_compressed_tensors_availableis_gptqmodel_availableis_hqq_availableis_quark_availableis_torch_availableis_torchao_availablelogging   )is_auto_gptq_availablec                   @   sL   e Zd ZdZdZdZdZdZdZdZ	dZ
d	Zd
ZdZdZdZdZdZdZdS )QuantizationMethodbitsandbytesgptqawqaqlmvptqquantoeetqhiggshqqcompressed-tensors
fbgemm_fp8torchaobitnetspqrfp8quarkN)__name__
__module____qualname__BITS_AND_BYTESGPTQAWQAQLMVPTQQUANTOEETQHIGGSHQQCOMPRESSED_TENSORS
FBGEMM_FP8TORCHAOBITNETSPQRFP8QUARK r=   r=   z/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/utils/quantization_config.pyr   1   s"    r   c                   @   s.   e Zd ZdZdZdZdZedefddZ	dS )	AWQLinearVersiongemmgemvexllamaipexr   c                 C   sN   |   } | dkrtjS | dkrtjS | dkrtjS | dkr tjS td|  )Nr@   rA   rB   rC   zUnknown AWQLinearVersion )lowerr?   GEMMGEMVEXLLAMAIPEX
ValueErrorr   r=   r=   r>   from_strJ   s   zAWQLinearVersion.from_strN)
r*   r+   r,   rE   rF   rG   rH   staticmethodstrrJ   r=   r=   r=   r>   r?   D   s    r?   c                   @      e Zd ZdZdZdS )AwqBackendPackingMethodautoawqzllm-awqN)r*   r+   r,   AUTOAWQLLMAWQr=   r=   r=   r>   rN   Y       rN   c                   @   s   e Zd ZU dZeed< edddZdee	e
jf fddZd	ee	ef fd
dZdd Zdd Zdded	e	fddZdd ZdS )QuantizationConfigMixinz-
    Mixin class for quantization config
    quant_methodFc                 K   sj   | di |}g }|  D ]\}}t||r!t||| || q|D ]}||d q$|r3||fS |S )a  
        Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters.

        Args:
            config_dict (`Dict[str, Any]`):
                Dictionary that will be used to instantiate the configuration object.
            return_unused_kwargs (`bool`,*optional*, defaults to `False`):
                Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
                `PreTrainedModel`.
            kwargs (`Dict[str, Any]`):
                Additional parameters from which to initialize the configuration object.

        Returns:
            [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
        Nr=   )itemshasattrsetattrappendpop)clsconfig_dictreturn_unused_kwargskwargsconfig	to_removekeyvaluer=   r=   r>   	from_dictf   s   

z!QuantizationConfigMixin.from_dictjson_file_pathc                 C   sZ   t |ddd}|  }tj|dddd }|| W d   dS 1 s&w   Y  dS )	a  
        Save this instance to a JSON file.

        Args:
            json_file_path (`str` or `os.PathLike`):
                Path to the JSON file in which this configuration instance's parameters will be saved.
            use_diff (`bool`, *optional*, defaults to `True`):
                If set to `True`, only the difference between the config instance and the default
                `QuantizationConfig()` is serialized to JSON file.
        wzutf-8)encodingr   Tindent	sort_keys
N)opento_dictjsondumpswrite)selfrc   writerr[   json_stringr=   r=   r>   to_json_file   s
   "z$QuantizationConfigMixin.to_json_filereturnc                 C   s   t | jS )
        Serializes this instance to a Python dictionary. Returns:
            `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
        )copydeepcopy__dict__ro   r=   r=   r>   rk      s   zQuantizationConfigMixin.to_dictc                 c   s*    t | j D ]	\}}||fV  q	dS )zTallows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixinNru   rv   rw   rU   )ro   attrra   r=   r=   r>   __iter__   s   z QuantizationConfigMixin.__iter__c                 C   s   | j j d|   S )N )	__class__r*   to_json_stringrx   r=   r=   r>   __repr__   s   z QuantizationConfigMixin.__repr__Tuse_diffc                 C   s.   |du r	|   }n|  }tj|dddd S )a  
        Serializes this instance to a JSON string.

        Args:
            use_diff (`bool`, *optional*, defaults to `True`):
                If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
                is serialized to JSON string.

        Returns:
            `str`: String containing all the attributes that make up this configuration instance in JSON format.
        Tr   rf   ri   )to_diff_dictrk   rl   rm   )ro   r   r[   r=   r=   r>   r~      s   
z&QuantizationConfigMixin.to_json_stringc                    sP   g  |  D ]\}}t| |rt| ||  | q fdd|  D }|S )a  
        Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
        returning all the unused kwargs.

        Args:
            kwargs (`Dict[str, Any]`):
                Dictionary of attributes to tentatively update this class.

        Returns:
            `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
        c                    s   i | ]\}}| vr||qS r=   r=   ).0r`   ra   r_   r=   r>   
<dictcomp>       z2QuantizationConfigMixin.update.<locals>.<dictcomp>)rU   rV   rW   rX   )ro   r]   r`   ra   unused_kwargsr=   r   r>   update   s   

zQuantizationConfigMixin.updateNF)T)r*   r+   r,   __doc__r   __annotations__classmethodrb   r   rL   osPathLikerr   r   r   rk   r{   r   boolr~   r   r=   r=   r=   r>   rS   ^   s   
 rS   c                   @   s   e Zd ZdZddddddgfdeded	ed
ee dee dee	 fddZ
dd Zedee	ef fddZdee	ef fddZdd Zdee	ef fddZdS )	HqqConfiga  
    This is wrapper around hqq's BaseQuantizeConfig.

    Args:
        nbits (`int`, *optional*, defaults to 4):
            Number of bits. Supported values are (8, 4, 3, 2, 1).
        group_size (`int`, *optional*, defaults to 64):
            Group-size value. Supported values are any value that is divisble by weight.shape[axis]).
        view_as_float (`bool`, *optional*, defaults to `False`):
            View the quantized weight as float (used in distributed training) if set to `True`.
        axis (`Optional[int]`, *optional*):
            Axis along which grouping is performed. Supported values are 0 or 1.
        dynamic_config (dict, *optional*):
            Parameters for dynamic configuration. The key is the name tag of the layer and the value is a quantization config.
            If set, each layer specified by its id will use its dedicated quantization configuration.
        skip_modules (`List[str]`, *optional*, defaults to `['lm_head']`):
            List of `nn.Linear` layers to skip.
        kwargs (`Dict[str, Any]`, *optional*):
            Additional parameters from which to initialize the configuration object.
       @   FNlm_headnbits
group_sizeview_as_floataxisdynamic_configskip_modulesc                 K   s   t  r
ddlm} ntddD ]}	|	|v rt|	d  q|d u r)d}td |dvr1td	|d urJi | _|D ]}
|di ||
 | j|
< q:n|di ||||d
| _tj	| _
|| _|   d S )Nr   )BaseQuantizeConfigzA valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`.)
quant_zeroquant_scaleoffload_metazH is deprecated. This parameter will be ignored in quantization settings.r   zYSetting axis=1 as faster backends such as TorchAO or BitBlas are only compatible with it.)r   r   z-Invalid axis value. Only 0 and 1 are allowed.)r   r   r   r   r=   )r   hqq.core.quantizer   ImportErrorloggerinforI   quant_configr   r5   rT   r   	post_init)ro   r   r   r   r   r   r   r]   HQQBaseQuantizeConfigdeprecated_keyr`   r=   r=   r>   __init__   s>   

	zHqqConfig.__init__c                 C      dS )~
        Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
        Nr=   rx   r=   r=   r>   r        zHqqConfig.post_initr^   c                 C   s   |  }|d |_ |d |_|S )zd
        Override from_dict, used in AutoQuantizationConfig.from_dict in quantizers/auto.py
        r   r   )r   r   )rZ   r^   instancer=   r=   r>   rb     s   

zHqqConfig.from_dictrs   c                 C   s   | j | j| jdS )rt   r   rT   r   r   rx   r=   r=   r>   rk   '  s   zHqqConfig.to_dictc                 C   (   |   }| jj dtj|ddd dS Nr|   r   Trf   ri   rk   r}   r*   rl   rm   ro   r[   r=   r=   r>   r   2      zHqqConfig.__repr__c                 C   @   |   }t   }i }| D ]\}}||| kr|||< q|S a&  
        Removes all attributes from config which correspond to the default config attributes for better readability and
        serializes to a Python dictionary.
        Returns:
            `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
        )rk   r   rU   ro   r[   default_config_dictserializable_config_dictr`   ra   r=   r=   r>   r   6  s   
zHqqConfig.to_diff_dict)r*   r+   r,   r   intr   r	   dictr   rL   r   r   r   r   r   rb   rk   r   r   r=   r=   r=   r>   r      s6    
1	r   c                   @   s   e Zd ZdZ										dddZedd	 Zejd
efdd	Zedd Z	e	jd
efddZ	dd Z
dd Zdd Zdeeef fddZdd Zdeeef fddZdS )BitsAndBytesConfigan  
    This is a wrapper class about all possible attributes and features that you can play with a model that has been
    loaded using `bitsandbytes`.

    This replaces `load_in_8bit` or `load_in_4bit`therefore both options are mutually exclusive.

    Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`,
    then more arguments will be added to this class.

    Args:
        load_in_8bit (`bool`, *optional*, defaults to `False`):
            This flag is used to enable 8-bit quantization with LLM.int8().
        load_in_4bit (`bool`, *optional*, defaults to `False`):
            This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from
            `bitsandbytes`.
        llm_int8_threshold (`float`, *optional*, defaults to 6.0):
            This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix
            Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339 Any hidden states value
            that is above this threshold will be considered an outlier and the operation on those values will be done
            in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but
            there are some exceptional systematic outliers that are very differently distributed for large models.
            These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of
            magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6,
            but a lower threshold might be needed for more unstable models (small models, fine-tuning).
        llm_int8_skip_modules (`List[str]`, *optional*):
            An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as
            Jukebox that has several heads in different places and not necessarily at the last position. For example
            for `CausalLM` models, the last `lm_head` is kept in its original `dtype`.
        llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`):
            This flag is used for advanced use cases and users that are aware of this feature. If you want to split
            your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use
            this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8
            operations will not be run on CPU.
        llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`):
            This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not
            have to be converted back and forth for the backward pass.
        bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`):
            This sets the computational type which might be different than the input type. For example, inputs might be
            fp32, but computation can be set to bf16 for speedups.
        bnb_4bit_quant_type (`str`,  *optional*, defaults to `"fp4"`):
            This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types
            which are specified by `fp4` or `nf4`.
        bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`):
            This flag is used for nested quantization where the quantization constants from the first quantization are
            quantized again.
        bnb_4bit_quant_storage (`torch.dtype` or str, *optional*, defaults to `torch.uint8`):
            This sets the storage type to pack the quanitzed 4-bit prarams.
        kwargs (`Dict[str, Any]`, *optional*):
            Additional parameters from which to initialize the configuration object.
    F      @Nfp4c                 K   s  t j| _|r|rtd|| _|| _|| _|| _|| _|| _	|| _
|	| _|d u r-tj| _nt|tr9tt|| _nt|tjrC|| _ntd|
d u rPtj| _n"t|
trd|
dvr]tdtt|
| _nt|
tjrn|
| _ntd|rtdt|  d| j d |   d S )	NVload_in_4bit and load_in_8bit are both True, but only one can be used at the same timez8bnb_4bit_compute_dtype must be a string or a torch.dtype)float16float32int8uint8float64bfloat16zv`bnb_4bit_quant_storage` must be a valid string (one of 'float16', 'float32', 'int8', 'uint8', 'float64', 'bfloat16') z8bnb_4bit_quant_storage must be a string or a torch.dtypezUnused kwargs: z. These kwargs are not used in .)r   r-   rT   rI   _load_in_8bit_load_in_4bitllm_int8_thresholdllm_int8_skip_modules llm_int8_enable_fp32_cpu_offloadllm_int8_has_fp16_weightbnb_4bit_quant_typebnb_4bit_use_double_quanttorchr   bnb_4bit_compute_dtype
isinstancerL   getattrdtyper   bnb_4bit_quant_storager   r   listkeysr}   r   )ro   load_in_8bitload_in_4bitr   r   r   r   r   r   r   r   r]   r=   r=   r>   r     s@   



"zBitsAndBytesConfig.__init__c                 C      | j S N)r   rx   r=   r=   r>   r        zBitsAndBytesConfig.load_in_4bitra   c                 C   .   t |ts	td| jr|rtd|| _d S )Nload_in_4bit must be a booleanr   )r   r   	TypeErrorr   rI   r   ro   ra   r=   r=   r>   r     
   


c                 C   r   r   )r   rx   r=   r=   r>   r     r   zBitsAndBytesConfig.load_in_8bitc                 C   r   )Nload_in_8bit must be a booleanr   )r   r   r   r   rI   r   r   r=   r=   r>   r     r   c                 C   s   t | jts
tdt | jtstdt | jtstd| jdur-t | jts-tdt | j	ts7tdt | j
tsAtd| jdurQt | jtjsQtdt | jts[td	t | jtsetd
| jrzttjdtdks|tddS dS )r   r   r   z"llm_int8_threshold must be a floatNz/llm_int8_skip_modules must be a list of stringsz2llm_int8_enable_fp32_cpu_offload must be a booleanz*llm_int8_has_fp16_weight must be a booleanz*bnb_4bit_compute_dtype must be torch.dtypez$bnb_4bit_quant_type must be a stringz+bnb_4bit_use_double_quant must be a booleanr   z0.39.0z[4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version)r   r   r   r   r   r   floatr   r   r   r   r   r   r   r   rL   r   r   parse	importlibmetadatarI   rx   r=   r=   r>   r     s2   zBitsAndBytesConfig.post_initc                 C   s   | j p| jS )zP
        Returns `True` if the model is quantizable, `False` otherwise.
        )r   r   rx   r=   r=   r>   is_quantizable  s   z!BitsAndBytesConfig.is_quantizablec                 C   s6   | j rdS | jr| jdkrdS | jr| jdkrdS dS )z
        This method returns the quantization method used for the model. If the model is not quantizable, it returns
        `None`.
        llm_int8r   nf4N)r   r   r   rx   r=   r=   r>   quantization_method  s   z&BitsAndBytesConfig.quantization_methodrs   c                 C   sX   t | j}t|d dd |d< t|d dd |d< | j|d< | j|d< |S )rt   r   r   r   r   r   r   )ru   rv   rw   rL   splitr   r   )ro   outputr=   r=   r>   rk     s   

zBitsAndBytesConfig.to_dictc                 C   r   r   r   r   r=   r=   r>   r     r   zBitsAndBytesConfig.__repr__c                 C   r   )a'  
        Removes all attributes from config which correspond to the default config attributes for better readability and
        serializes to a Python dictionary.

        Returns:
            `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
        )rk   r   rU   r   r=   r=   r>   r     s   
zBitsAndBytesConfig.to_diff_dict)
FFr   NFFNr   FN)r*   r+   r,   r   r   propertyr   setterr   r   r   r   r   r   rL   r   rk   r   r   r=   r=   r=   r>   r   L  s6    5
7

%r   c                   @   rM   )ExllamaVersionr   r   N)r*   r+   r,   ONETWOr=   r=   r=   r>   r   3  rR   r   c                0       s&  e Zd ZdZ																					d+d	ed
edeeee	 e	f  dede
dededede	deee	ef  dee	 dedee dee	 deee	  dedee dee dee deee	ef  dedeeee	   f,dd Zd!d" Zd#d$ Z fd%d&Zd'd( Zed)d* Z  ZS ),
GPTQConfigaa  
    This is a wrapper class about all possible attributes and features that you can play with a model that has been
    loaded using `optimum` api for gptq quantization relying on auto_gptq backend.

    Args:
        bits (`int`):
            The number of bits to quantize to, supported numbers are (2, 3, 4, 8).
        tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*):
            The tokenizer used to process the dataset. You can pass either:
                - A custom tokenizer object.
                - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
                - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
                    using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
        dataset (`Union[List[str]]`, *optional*):
            The dataset used for quantization. You can provide your own dataset in a list of string or just use the
            original datasets used in GPTQ paper ['wikitext2','c4','c4-new']
        group_size (`int`, *optional*, defaults to 128):
            The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization.
        damp_percent (`float`, *optional*, defaults to 0.1):
            The percent of the average Hessian diagonal to use for dampening. Recommended value is 0.1.
        desc_act (`bool`, *optional*, defaults to `False`):
            Whether to quantize columns in order of decreasing activation size. Setting it to False can significantly
            speed up inference but the perplexity may become slightly worse. Also known as act-order.
        sym (`bool`, *optional*, defaults to `True`):
            Whether to use symetric quantization.
        true_sequential (`bool`, *optional*, defaults to `True`):
            Whether to perform sequential quantization even within a single Transformer block. Instead of quantizing
            the entire block at once, we perform layer-wise quantization. As a result, each layer undergoes
            quantization using inputs that have passed through the previously quantized layers.
        checkpoint_format (`str`, *optional*, defaults to `"gptq"`):
            GPTQ weight format. `gptq`(v1) is supported by both gptqmodel and auto-gptq. `gptq_v2` is gptqmodel only.
        meta (`Dict[str, any]`, *optional*):
            Properties, such as tooling:version, that do not directly contributes to quantization or quant inference are stored in meta.
            i.e. `meta.quantizer`: ["optimum:_version_", "gptqmodel:_version_"]
        backend (`str`, *optional*):
            Controls which gptq kernel to be used. Valid values for gptqmodel are `auto`, `auto_trainable` and more. For auto-gptq, only
            valid value is None and `auto_trainable`. Ref gptqmodel backends: https://github.com/ModelCloud/GPTQModel/blob/main/gptqmodel/utils/backend.py
        use_cuda_fp16 (`bool`, *optional*, defaults to `False`):
            Whether or not to use optimized cuda kernel for fp16 model. Need to have model in fp16. Auto-gptq only.
        model_seqlen (`int`, *optional*):
            The maximum sequence length that the model can take.
        block_name_to_quantize (`str`, *optional*):
            The transformers block name to quantize. If None, we will infer the block name using common patterns (e.g. model.layers)
        module_name_preceding_first_block (`List[str]`, *optional*):
            The layers that are preceding the first Transformer block.
        batch_size (`int`, *optional*, defaults to 1):
            The batch size used when processing the dataset
        pad_token_id (`int`, *optional*):
            The pad token id. Needed to prepare the dataset when `batch_size` > 1.
        use_exllama (`bool`, *optional*):
            Whether to use exllama backend. Defaults to `True` if unset. Only works with `bits` = 4.
        max_input_length (`int`, *optional*):
            The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input
            length. It is specific to the exllama backend with act-order.
        exllama_config (`Dict[str, Any]`, *optional*):
            The exllama config. You can specify the version of the exllama kernel through the `version` key. Defaults
            to `{"version": 1}` if unset.
        cache_block_outputs (`bool`, *optional*, defaults to `True`):
            Whether to cache block outputs to reuse as inputs for the succeeding block.
        modules_in_block_to_quantize (`List[List[str]]`, *optional*):
            List of list of module names to quantize in the specified block. This argument is useful to exclude certain linear modules from being quantized.
            The block to quantize can be specified by setting `block_name_to_quantize`. We will quantize each list sequentially. If not set, we will quantize all linear layers.
            Example: `modules_in_block_to_quantize =[["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"], ["self_attn.o_proj"]]`.
            In this example, we will first quantize the q,k,v layers simultaneously since they are independent.
            Then, we will quantize `self_attn.o_proj` layer with the q,k,v layers quantized. This way, we will get
            better results since it reflects the real input `self_attn.o_proj` will get when the model is quantized.
    N   皙?FTr   r   bits	tokenizerdatasetr   damp_percentdesc_actsymtrue_sequentialcheckpoint_formatmetabackenduse_cuda_fp16model_seqlenblock_name_to_quantize!module_name_preceding_first_block
batch_sizepad_token_iduse_exllamamax_input_lengthexllama_configcache_block_outputsmodules_in_block_to_quantizec                 K   s   t j| _|| _|| _|| _|| _|| _|| _|| _	|| _
|	 | _|
| _t|tr-| n|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|   d S r   )r   r.   rT   r   r   r   r   r   r   r   r   rD   r   r   r   rL   r   r   r   r   r   r   r   r   r   r  r  r  r   )ro   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r  r  r  r]   r=   r=   r>   r   ~  s0   
zGPTQConfig.__init__c                    .   t | j}g d  fdd| D }|S )N)r   r  r   r   r   c                       i | ]\}}| v r||qS r=   r=   r   ijloading_attibutesr=   r>   r     r   z5GPTQConfig.get_loading_attributes.<locals>.<dictcomp>ry   ro   attibutes_dictloading_attibutes_dictr=   r	  r>   get_loading_attributes  s   z!GPTQConfig.get_loading_attributesc                 C   s<  | j dvrtd| j  | jdkr| jdkrtdd| j  k r)dk s.td td| jdurbt| jtrT| jd	v rFt| j d
| jdvrStd| j nt| jtsbtd| j t rx| j	du rw| j
durt| j
stdnd| _	n| j	dkrd| _
| j
du rd| _
| jdu rdtji| _n d| jvrtd| jd tjtjfvr| jd }td| | j dkr| j
r| jd tjkrtd n5| jd tjkrt rttjd}ttjd}|tdks|tdkrtd| d| | jdurttjd}|tdk rtddS dS );
        Safety checker that arguments are correct
        )r      r      z6Only support quantization to [2,3,4,8] bits but found r   z0group_size must be greater than 0 or equal to -1r   z"damp_percent must between 0 and 1.N)ptbzptb-newzh dataset was deprecated. You can only choose between
                        ['wikitext2','c4','c4-new'])	wikitext2c4zc4-newzYou have entered a string value for dataset. You can only choose between
                        ['wikitext2','c4','c4-new'], but we found zxdataset needs to be either a list of string or a value in
                    ['wikitext2','c4','c4-new'], but we found auto_trainableautoFTr   /`exllama_config` needs to have a `version` key.aOnly supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version r   zYou have activated exllama backend. Note that you can get better inference speed using exllamav2 kernel by setting `exllama_config`.optimum	auto_gptqz1.13.2z0.4.2zxYou need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum z and autogptq z1.15.0zYou current version of `optimum` does not support `modules_in_block_to_quantize` quantization argument, please upgrade `optimum` package to a version superior than 1.15.0 .)r   rI   r   r   r   r   rL   r   r   r   r   r  r   r   r   r   r   r   r   r   r   r   r  )ro   exllama_versionoptimum_versionautogptq_versionr=   r=   r>   r     s   










zGPTQConfig.post_initc                    s   t   }|dd  |S )Ndisable_exllama)superrk   rY   r   r}   r=   r>   rk     s   
zGPTQConfig.to_dictc                 C   s   |   }| j |d< |S )z=
        Get compatible dict for optimum gptq config
        r  )rk   r   )ro   
quant_dictr=   r=   r>   to_dict_optimum  s   zGPTQConfig.to_dict_optimumc                 C   s2   d|v r|d  |d< | d | di |}|S )zD
        Get compatible class with optimum gptq config dict
        r  r   Nr=   )rY   )rZ   r[   r^   r=   r=   r>   from_dict_optimum  s
   
zGPTQConfig.from_dict_optimum)NNr   r   FTTr   NNFNNNr   NNNNTN)r*   r+   r,   r   r   r   r	   r   r   rL   r   r   r   r   r  r   rk   r#  r   r$  __classcell__r=   r=   r!  r>   r   8  s    G	


3K	r   c                   @   s   e Zd ZdZdddejejdddddf
dedede	d	ed
ede
e	 de
e de
e de
e de
eeef  fddZdd Zdd ZdS )	AwqConfigad	  
    This is a wrapper class about all possible attributes and features that you can play with a model that has been
    loaded using `auto-awq` library awq quantization relying on auto_awq backend.

    Args:
        bits (`int`, *optional*, defaults to 4):
            The number of bits to quantize to.
        group_size (`int`, *optional*, defaults to 128):
            The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization.
        zero_point (`bool`, *optional*, defaults to `True`):
            Whether to use zero point quantization.
        version (`AWQLinearVersion`, *optional*, defaults to `AWQLinearVersion.GEMM`):
            The version of the quantization algorithm to use. GEMM is better for big batch_size (e.g. >= 8) otherwise,
            GEMV is better (e.g. < 8 ). GEMM models are compatible with Exllama kernels.
        backend (`AwqBackendPackingMethod`, *optional*, defaults to `AwqBackendPackingMethod.AUTOAWQ`):
            The quantization backend. Some models might be quantized using `llm-awq` backend. This is useful for users
            that quantize their own models using `llm-awq` library.
        do_fuse (`bool`, *optional*, defaults to `False`):
            Whether to fuse attention and mlp layers together for faster inference
        fuse_max_seq_len (`int`, *optional*):
            The Maximum sequence length to generate when using fusing.
        modules_to_fuse (`dict`, *optional*, default to `None`):
            Overwrite the natively supported fusing scheme with the one specified by the users.
        modules_to_not_convert (`list`, *optional*, default to `None`):
            The list of modules to not quantize, useful for quantizing models that explicitly require to have
            some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).
            Note you cannot quantize directly with transformers, please refer to `AutoAWQ` documentation for quantizing HF models.
        exllama_config (`Dict[str, Any]`, *optional*):
            You can specify the version of the exllama kernel through the `version` key, the maximum sequence
            length through the `max_input_len` key, and the maximum batch size through the `max_batch_size` key.
            Defaults to `{"version": 2, "max_input_len": 2048, "max_batch_size": 8}` if unset.
    r   r   TNr   r   
zero_pointr   r   do_fusefuse_max_seq_lenmodules_to_fusemodules_to_not_convertr  c                 K   sv   t j| _|| _|| _|| _|| _|| _|| _|	| _	|
| _
|| _|d u r/|d uo,t|dk| _n|| _|| _|   d S )Nr   )r   r/   rT   r   r   r'  r   r   r)  r+  r  r*  lenr(  r   )ro   r   r   r'  r   r   r(  r)  r*  r+  r  r]   r=   r=   r>   r   H  s   zAwqConfig.__init__c           
         sV   j tjtjfvrtdtj dtj d j  t j _ jtjtj	tj
tjfvr5td j  j tjkr_tj sItj sItdtj r_tj }|\}}|dk r_td jrk jdu rktd	 jrd
}d}t rttjdt|k}|std| d jdurd
}d}t rttjdt|k}|std| d jrψ jdurg d}t fdd|D std|  jtj
kr'd
}d}t rttjdt|k}|std| d jdu rtjddd _dS d jvrtd jd tjtjfvr) jd }	td|	 dS dS )r  z(Only supported quantization backends in z and z - not recognized backend zOnly supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV, AWQLinearVersion.EXLLAMA, AWQLinearVersion.IPEX] - not recognized version z1LLM-AWQ backend is only supported on CUDA and XPUr  zMLLM-AWQ backend is only supported on CUDA GPUs with compute capability >= 8.0NzYou cannot enable fused modules without specifying a `fuse_max_seq_len`, make sure to pass a valid `fuse_max_seq_len` for your usecaseFz0.1.7rO   znYou current version of `autoawq` does not support module fusing, please upgrade `autoawq` package to at least r   z0.1.8z}You current version of `autoawq` does not support module quantization skipping, please upgrade `autoawq` package to at least )hidden_sizenum_attention_headsnum_key_value_headsmlp	attention	layernorm	use_alibic                 3   s    | ]}| j v V  qd S r   )r*  )r   r`   rx   r=   r>   	<genexpr>  s    z&AwqConfig.post_init.<locals>.<genexpr>zGRequired fields are missing in the fusing mapping, required fields are z0.2.0zpYou current version of `autoawq` does not support exllama backend, please upgrade `autoawq` package to at least i   )r   max_input_lenmax_batch_sizer   r  r  )r   rN   rP   rQ   rI   r?   rJ   r   rE   rF   rG   rH   r   cudais_availablexpuget_device_capabilityr(  r)  r   r   r   r   r+  r*  allr  r   r   r   )
ro   compute_capabilitymajorminorawq_version_supports_fusingMIN_AWQ_VERSION#awq_version_supports_non_conversionrequired_keysawq_version_supports_exllamar  r=   rx   r>   r   j  s   






	
zAwqConfig.post_initc                    r  )N)r   r(  r*  r)  r  c                    r  r=   r=   r  r	  r=   r>   r     r   z4AwqConfig.get_loading_attributes.<locals>.<dictcomp>ry   r  r=   r	  r>   r    s   z AwqConfig.get_loading_attributes)r*   r+   r,   r   r?   rE   rN   rP   r   r   r	   r   r   r   rL   r   r   r  r=   r=   r=   r>   r&  %  sF    #	

"er&  c                   @   sJ   e Zd ZdZ					ddededed	ed
eee  f
ddZdd Z	dS )
AqlmConfiga  
    This is a wrapper class about `aqlm` parameters.

    Args:
        in_group_size (`int`, *optional*, defaults to 8):
            The group size along the input dimension.
        out_group_size (`int`, *optional*, defaults to 1):
            The group size along the output dimension. It's recommended to always use 1.
        num_codebooks (`int`, *optional*, defaults to 1):
            Number of codebooks for the Additive Quantization procedure.
        nbits_per_codebook (`int`, *optional*, defaults to 16):
            Number of bits encoding a single codebook vector. Codebooks size is 2**nbits_per_codebook.
        linear_weights_not_to_quantize (`Optional[List[str]]`, *optional*):
            List of full paths of `nn.Linear` weight parameters that shall not be quantized.
        kwargs (`Dict[str, Any]`, *optional*):
            Additional parameters from which to initialize the configuration object.
    r  r      Nin_group_sizeout_group_sizenum_codebooksnbits_per_codebooklinear_weights_not_to_quantizec                 K   s2   t j| _|| _|| _|| _|| _|| _|   d S r   )	r   r0   rT   rF  rG  rH  rI  rJ  r   )ro   rF  rG  rH  rI  rJ  r]   r=   r=   r>   r     s   	zAqlmConfig.__init__c                 C   s   t | jts
tdt | jtstdt | jtstdt | jts(td| jdur7t | jts7t	d| jdu rAg | _dS dS )r   zin_group_size must be a floatzout_group_size must be a floatznum_codebooks must be a floatz"nbits_per_codebook must be a floatNz8linear_weights_not_to_quantize must be a list of strings)
r   rF  r   r   rG  rH  rI  rJ  r   rI   rx   r=   r=   r>   r     s   

zAqlmConfig.post_init)r  r   r   rE  N)
r*   r+   r,   r   r   r	   r   rL   r   r   r=   r=   r=   r>   rD    s&    

rD  c                   @   sx   e Zd ZdZdddddddddgddgddddgfdeded	ed
ededededededededefddZdd ZdS )VptqLayerConfiga  
    This is used to explain vptq config params for each layer
    Args:
        enable_norm (`bool`, *optional*, defaults to `True`): to control if we have scale/bias for fp-weight
        enable_perm (`bool`, *optional*, defaults to `True`): to perm input_channel or not
        group_num (`int`, *optional*, defaults to `1`): how many single groups for vector-quantization
        group_size (`int`, *optional*, defaults to `-1`): depends on out-features
        indices_as_float (`bool`, *optional*, defaults to `False`): for Finetuning
        is_indice_packed (`bool`, *optional*, defaults to `True`): should always be True
        num_centroids (`list`, *optional*, defaults to `[-1, -1]`): centriod numbers of clusters
        num_res_centroids (`list`, *optional*, defaults to `[-1, -1]`): ditto for residual
        outlier_size (`int`, *optional*, defaults to `1`): outliers
        vector_lens (`list`, *optional*, defaults to `[-1, -1]`): centroid vector length in quantization
    Tr   r  Fr   enable_normenable_perm	group_numr   in_featuresindices_as_floatis_indice_packednum_centroidsnum_res_centroidsout_featuresoutlier_sizevector_lensc                 K   sT   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
|| _|   d S r   )rL  rM  rN  r   rO  rP  rQ  rR  rS  rT  rU  rV  r   )ro   rL  rM  rN  r   rO  rP  rQ  rR  rS  rT  rU  rV  r]   r=   r=   r>   r   #  s   zVptqLayerConfig.__init__c                 C   s   | j du r	tddS )r  Fz&is_indice_packed should always be TrueN)rQ  rI   rx   r=   r=   r>   r   A  s   
zVptqLayerConfig.post_initN)	r*   r+   r,   r   r   r   tupler   r   r=   r=   r=   r>   rK    sP    	

rK  c                
   @   sP   e Zd ZdZdi i dfdedeeef deeef dee	 fdd	Z
d
d ZdS )
VptqConfiga1  
    This is a wrapper class about `vptq` parameters.

    Args:
        enable_proxy_error (`bool`, *optional*, defaults to `False`): calculate proxy error for each layer
        config_for_layers (`Dict`, *optional*, defaults to `{}`): quantization params for each layer
        shared_layer_config (`Dict`, *optional*, defaults to `{}`): shared quantization params among layers
        modules_to_not_convert (`list`, *optional*, default to `None`):
            The list of modules to not quantize, useful for quantizing models that explicitly require to have
            some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).
        kwargs (`Dict[str, Any]`, *optional*):
            Additional parameters from which to initialize the configuration object.
    FNenable_proxy_errorconfig_for_layersshared_layer_configr+  c                 K   s,   t j| _|| _|| _|| _|| _|   d S r   )r   r1   rT   rY  rZ  r[  r+  r   )ro   rY  rZ  r[  r+  r]   r=   r=   r>   r   Y  s   zVptqConfig.__init__c                 C   s8   | j  D ]\}}tdi | q| jdu rtddS )r  TzCenable_proxy_error should always be False until we support trainingNr=   )rZ  rU   rK  rY  rI   )ro   
layer_namelayer_paramr=   r=   r>   r   h  s
   
zVptqConfig.post_init)r*   r+   r,   r   r   r   rL   r   r	   r   r   r   r=   r=   r=   r>   rX  I  s     


rX  c                   @   s2   e Zd ZdZ			d	dee fddZdd ZdS )
QuantoConfiga  
    This is a wrapper class about all possible attributes and features that you can play with a model that has been
    loaded using `quanto`.

    Args:
        weights (`str`, *optional*, defaults to `"int8"`):
            The target dtype for the weights after quantization. Supported values are ("float8","int8","int4","int2")
        activations (`str`, *optional*):
            The target dtype for the activations after quantization. Supported values are (None,"int8","float8")
        modules_to_not_convert (`list`, *optional*, default to `None`):
            The list of modules to not quantize, useful for quantizing models that explicitly require to have
            some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).
    r   Nr+  c                 K   s&   t j| _|| _|| _|| _|   d S r   )r   r2   rT   weightsactivationsr+  r   )ro   r_  r`  r+  r]   r=   r=   r>   r     
   zQuantoConfig.__init__c                 C   sT   g d}g d}| j |vrtd| d| j  | j|vr(td| d| j dS )r  )float8r   int4int2)Nr   rb  Only support weights in  but found N)r_  rI   r`  )ro   accepted_weightsaccepted_activationsr=   r=   r>   r     s   

zQuantoConfig.post_init)r   NN)r*   r+   r,   r   r	   r   r   r   r=   r=   r=   r>   r^  r  s    
r^  c                   @   4   e Zd ZdZ		d
dedee fddZdd	 ZdS )
EetqConfiga  
    This is a wrapper class about all possible attributes and features that you can play with a model that has been
    loaded using `eetq`.

    Args:
        weights (`str`, *optional*, defaults to `"int8"`):
            The target dtype for the weights. Supported value is only "int8"
        modules_to_not_convert (`list`, *optional*, default to `None`):
            The list of modules to not quantize, useful for quantizing models that explicitly require to have
            some modules left in their original precision.
    r   Nr_  r+  c                 K   s    t j| _|| _|| _|   d S r   )r   r3   rT   r_  r+  r   )ro   r_  r+  r]   r=   r=   r>   r     s   zEetqConfig.__init__c                 C   s*   dg}| j |vrtd| d| j  dS )r  r   re  rf  N)r_  rI   )ro   rg  r=   r=   r>   r     s   
zEetqConfig.post_init)r   N)	r*   r+   r,   r   rL   r	   r   r   r   r=   r=   r=   r>   rj    s    
rj  c                       s   e Zd ZdZ									d'deeedee f f d	ed
dded dee	 deee  deee
f dedefddZdd Zed( fdd	Zdeee
f fddZdeee
f fddZdd  Zed!d" Zed#d$ Zed%d& Z  ZS ))CompressedTensorsConfiga  
    This is a wrapper class that handles compressed-tensors quantization config options.
    It is a wrapper around `compressed_tensors.QuantizationConfig`
    Args:
        config_groups (`typing.Dict[str, typing.Union[ForwardRef('QuantizationScheme'), typing.List[str]]]`, *optional*):
            dictionary mapping group name to a quantization scheme definition
        format (`str`, *optional*, defaults to `"dense"`):
            format the model is represented as. Set `run_compressed` True to execute model as the
            compressed format if not `dense`
        quantization_status (`QuantizationStatus`, *optional*, defaults to `"initialized"`):
            status of model in the quantization lifecycle, ie 'initialized', 'calibration', 'frozen'
        kv_cache_scheme (`typing.Union[QuantizationArgs, NoneType]`, *optional*):
            specifies quantization of the kv cache. If None, kv cache is not quantized.
        global_compression_ratio (`typing.Union[float, NoneType]`, *optional*):
            0-1 float percentage of model compression
        ignore (`typing.Union[typing.List[str], NoneType]`, *optional*):
            layer names or types to not quantize, supports regex prefixed by 're:'
        sparsity_config (`typing.Dict[str, typing.Any]`, *optional*):
            configuration for sparsity compression
        quant_method (`str`, *optional*, defaults to `"compressed-tensors"`):
            do not override, should be compressed-tensors
        run_compressed (`bool`, *optional*, defaults to `True`): alter submodules (usually linear) in order to
            emulate compressed model execution if True, otherwise use default submodule
    Ndenseinitializedr#   Tconfig_groupsQuantizationSchemeformatquantization_statusQuantizationStatuskv_cache_schemeQuantizationArgsglobal_compression_ratioignoresparsity_configrT   run_compressedc
                 K   s   t  rddlm} ddlm} ntdd | _d | _|	| _|s!|r2|	||||||||	d|
| _|rA|j
|dfi || _tj| _d S )Nr   )SparsityCompressionConfig)QuantizationConfigzcompressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`.)rn  rT   rp  rq  rs  ru  rv  rx  rp  )r   compressed_tensors.configry  compressed_tensors.quantizationrz  r   quantization_configrw  rx  model_validateload_from_registrygetr   r6   rT   )ro   rn  rp  rq  rs  ru  rv  rw  rT   rx  r]   ry  rz  r=   r=   r>   r     s<   	z CompressedTensorsConfig.__init__c                 C   sB   | j r| jrtd d| _ d S | jstd d| _ d S d S d S )Nz`run_compressed` is only supported for quantized_compressed models and not for sparsified models. Setting `run_compressed=False`FzX`run_compressed` is only supported for compressed models. Setting `run_compressed=False`)rx  is_sparsification_compressedr   warnis_quantization_compressedrx   r=   r=   r>   r     s   


z!CompressedTensorsConfig.post_initFc                    s<   d|v rt dd|di|d }t j|fd|i|S )a  
        Instantiates a [`CompressedTensorsConfig`] from a Python dictionary of parameters.
        Optionally unwraps any args from the nested quantization_config

        Args:
            config_dict (`Dict[str, Any]`):
                Dictionary that will be used to instantiate the configuration object.
            return_unused_kwargs (`bool`,*optional*, defaults to `False`):
                Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
                `PreTrainedModel`.
            kwargs (`Dict[str, Any]`):
                Additional parameters from which to initialize the configuration object.

        Returns:
            [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.

        r}  rw  r\   Nr=   )r   r  r   rb   )rZ   r[   r\   r]   r!  r=   r>   rb     s   z!CompressedTensorsConfig.from_dictrs   c                 C   sL   i }| j dur| j  }ntj|d< | jdur | j |d< |S i |d< |S )z
        Quantization config to be added to config.json

        Serializes this instance to a Python dictionary. Returns:
            `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
        NrT   rw  )r}  r   r   r6   rw  )ro   r}  r=   r=   r>   rk   /  s   


zCompressedTensorsConfig.to_dictc                 C   sH   |   }t   }i }| D ]\}}||vs||| kr!|||< q|S r   )rk   rk  rU   r   r=   r=   r>   r   C  s   
z$CompressedTensorsConfig.to_diff_dictc                 C   s
   d| j iS )Nrx  )rx  rx   r=   r=   r>   r  X  s   
z.CompressedTensorsConfig.get_loading_attributesc                 C   s   t | jo
t | jjS r   )r   r}  rn  rx   r=   r=   r>   is_quantized[  s   z$CompressedTensorsConfig.is_quantizedc                 C   s    ddl m} | jo| jj|jkS )Nr   )rr  )r|  rr  r  r}  rq  
COMPRESSED)ro   rr  r=   r=   r>   r  _  s   z2CompressedTensorsConfig.is_quantization_compressedc                 C   s,   ddl m}m} t| j|o| jj|jjkS )Nr   )CompressionFormatry  )r{  r  ry  r   rw  rp  rl  ra   )ro   r  ry  r=   r=   r>   r  e  s   z4CompressedTensorsConfig.is_sparsification_compressed)	Nrl  rm  NNNNr#   Tr   )r*   r+   r,   r   r   rL   r   r   r	   r   r   r   r   r   r   rb   rk   r   r  r   r  r  r  r%  r=   r=   r!  r>   rk    sT    

	

0

rk  c                   @   ri  )FbgemmFp8ConfigaG  
    This is a wrapper class about all possible attributes and features that you can play with a model that has been
    loaded using fbgemm fp8 quantization.

    Args:
        activation_scale_ub (`float`, *optional*, defaults to 1200.0):
            The activation scale upper bound. This is used when quantizing the input activation.
        modules_to_not_convert (`list`, *optional*, default to `None`):
            The list of modules to not quantize, useful for quantizing models that explicitly require to have
            some modules left in their original precision.
         @Nactivation_scale_ubr+  c                 K   s   t j| _|| _|| _d S r   )r   r7   rT   r  r+  )ro   r  r+  r]   r=   r=   r>   r     s   
zFbgemmFp8Config.__init__c                    s,   t | j}dg  fdd| D }|S )Nr  c                    r  r=   r=   r  r	  r=   r>   r     r   z:FbgemmFp8Config.get_loading_attributes.<locals>.<dictcomp>ry   r  r=   r	  r>   r    s   z&FbgemmFp8Config.get_loading_attributes)r  N)	r*   r+   r,   r   r   r	   r   r   r  r=   r=   r=   r>   r  r  s    

r  c                   @   s\   e Zd ZdZ						ddeded	eee  d
ededeeee	f  fddZ
dd ZdS )HiggsConfiga  
    HiggsConfig is a configuration class for quantization using the HIGGS method.

    Args:
        bits (int, *optional*, defaults to 4):
            Number of bits to use for quantization. Can be 2, 3 or 4. Default is 4.
        p (int, *optional*, defaults to 2):
            Quantization grid dimension. 1 and 2 are supported. 2 is always better in practice. Default is 2.
        modules_to_not_convert (`list`, *optional*, default to ["lm_head"]):
            List of linear layers that should not be quantized.
        hadamard_size (int, *optional*, defaults to 512):
            Hadamard size for the HIGGS method. Default is 512. Input dimension of matrices is padded to this value. Decreasing this below 512 will reduce the quality of the quantization.
        group_size (int, *optional*, defaults to 256):
            Group size for the HIGGS method. Can be 64, 128 or 256. Decreasing it barely affects the performance. Default is 256. Must be a divisor of hadamard_size.
        tune_metadata ('dict', *optional*, defaults to {}):
            Module-wise metadata (gemm block shapes, GPU metadata, etc.) for saving the kernel tuning results. Default is an empty dictionary. Is set automatically during tuning.
    r   r   N      r   pr+  hadamard_sizer   tune_metadatac                 K   sD   |d u ri }t j| _|| _|| _|| _|| _|| _|| _| 	  d S r   )
r   r4   rT   r   r  r+  r  r   r  r   )ro   r   r  r+  r  r   r  r]   r=   r=   r>   r     s   
zHiggsConfig.__init__c                 C   sR   | j dvr	td| jdvrtd| jdvrtd| j| j dkr'tdd	S )
r   )r   r  r   zbits must be 2, 3, or 4)r   r   z0p must be 1 or 2. 2 is always better in practice)r   r   r  z"group_size must be 64, 128, or 256r   z-hadamard_size must be divisible by group_sizeN)r   rI   r  r   r  rx   r=   r=   r>   r     s   


zHiggsConfig.post_init)r   r   Nr  r  N)r*   r+   r,   r   r   r	   r   rL   r   r   r   r   r=   r=   r=   r>   r    s,    

r  c                       s   e Zd ZU eed< eedf ed< ee ed< e	ee
f ed< 	 	ddeedf dee fddZed	ejfd
dZdd Zdd Zdd Zdd Z fddZedddZ  ZS )TorchAoConfigrT   AOBaseConfig
quant_typer+  quant_type_kwargsNc                 K   s.   t j| _|| _|| _|d|| _|   d S )Nr  )r   r8   rT   r  r+  r  r  r   )ro   r  r+  r]   r=   r=   r>   r     s
   zTorchAoConfig.__init__rs   c                   C   s    t  stdttjdS )zDCentralized check for TorchAO availability and version requirements.zRTorchAoConfig requires torchao to be installed. Install with `pip install torchao`r%   )r   rI   r   r   r   r   r=   r=   r=   r>   _get_ao_version  s   zTorchAoConfig._get_ao_versionc                 C   st   |   }t| jtr|   dS |tdkr/ddlm} t| j|s-t	dt
| j dS t	dt
| j d)z(Validate configuration and set defaults.0.9.0r   )r  zDquant_type must be either a string or an AOBaseConfig instance, got z6In torchao <= 0.9.0, quant_type must be a string. Got zB. Please upgrade to torchao > 0.9.0 to use AOBaseConfig instances.N)r  r   r  rL   _validate_string_quant_typer   r   torchao.quantization.quant_apir  rI   type)ro   
ao_versionr  r=   r=   r>   r     s   zTorchAoConfig.post_initc              	   C   s   |   }| j|vrtd| j dd|  || j }t|}dd |j D }t| j	| }|rItd| j dd| dd| d	S )
z*Validate string quant_type and its kwargs.z&Unsupported string quantization type: z. Supported types: z, c                 S   s$   h | ]}|j tjtjfv r|jqS r=   )kindr   KEYWORD_ONLYPOSITIONAL_OR_KEYWORDname)r   paramr=   r=   r>   	<setcomp>7  s
    z<TorchAoConfig._validate_string_quant_type.<locals>.<setcomp>zUnexpected keyword arg for z: z. Valid kwargs: N)
!_get_torchao_quant_type_to_methodr  rI   joinr   r   
parametersvaluessetr  )ro   methodsmethodsigvalid_kwargsinvalid_kwargsr=   r=   r>   r  *  s(   


z)TorchAoConfig._validate_string_quant_typec                 C   s&   ddl m}m}m}m} ||||dS )zAGet mapping of quant_type strings to their corresponding methods.r   )	autoquantint4_weight_only#int8_dynamic_activation_int8_weightint8_weight_only)r  r  r  r  )torchao.quantizationr  r  r  r  )ro   r  r  r  r  r=   r=   r>   r  D  s   z/TorchAoConfig._get_torchao_quant_type_to_methodc                 C   s   t | jtr?|  }| j }tj s5t	 r5| jdkr5t
tj
dt
dkr5ddlm} | |d< || j di |S | jS )	zBCreate the appropriate quantization method based on configuration.r  r%   z0.8.0r   )Int4CPULayoutlayoutNr=   )r   r  rL   r  r  ru   r   r7  r8  r   r   r   r   r   torchao.dtypesr  )ro   r  r  r  r=   r=   r>   get_apply_tensor_subclassT  s   


z'TorchAoConfig.get_apply_tensor_subclassc                    sj   t   }t| jtr$d|v r"d|d v r"t|d d |d d< |S ddlm} d|| ji|d< |S )z&Convert configuration to a dictionary.r  r  r   )config_to_dictdefaultr  )	r   rk   r   r  rL   dataclassesasdicttorchao.core.configr  )ro   dr  r!  r=   r>   rk   g  s   
	zTorchAoConfig.to_dictFc                 K   sz   |   }|tdksJ d| }|d}t|dkr"d|v s&J d|d }ddlm} ||}| d
d|i|S )z'Create configuration from a dictionary.r  zATorchAoConfig requires torchao > 0.9.0 for construction from dictr  r   r  z8Expected only one key 'default' in quant_type dictionaryr   )config_from_dictNr=   )r  r   r   ru   rY   r,  r  r  )rZ   r[   r\   r]   
ao_verisonr  r  r=   r=   r>   rb   y  s   
zTorchAoConfig.from_dictr   r   )r*   r+   r,   r   r   r   rL   r	   r   r   r   r   rK   r   Versionr  r   r  r  r  rk   r   rb   r%  r=   r=   r!  r>   r    s*   
 5

r  c                   @   s*   e Zd Z	ddee fddZdd ZdS )BitNetConfigNr+  c                 K   s   t j| _|| _|   d S r   )r   r9   rT   r+  r   )ro   r+  r]   r=   r=   r>   r     s   zBitNetConfig.__init__c                 C   r   )r  Nr=   rx   r=   r=   r>   r     r   zBitNetConfig.post_initr   )r*   r+   r,   r	   r   r   r   r=   r=   r=   r>   r    s    
	r  c                   @   sV   e Zd ZdZ					ddedededeeeef  d	eee  f
d
dZ	dd Z
dS )
SpQRConfigaa  
    This is a wrapper class about `spqr` parameters. Refer to the original publication for more details.

    Args:
        bits (`int`, *optional*, defaults to 3):
            Specifies the bit count for the weights and first order zero-points and scales.
            Currently only bits = 3 is supported.
        beta1 (`int`, *optional*, defaults to 16):
            SpQR tile width. Currently only beta1 = 16 is supported.
        beta2 (`int`, *optional*, defaults to 16):
            SpQR tile height. Currently only beta2 = 16 is supported.
        shapes (`Optional`, *optional*):
            A dictionary holding the shape of each object. We need this because it's impossible
            to deduce the exact size of the parameters just from bits, beta1, beta2.
        modules_to_not_convert (`Optional[List[str]]`, *optional*):
            Optionally, provides a list of full paths of `nn.Linear` weight parameters that shall not be quantized.
            Defaults to None.
        kwargs (`Dict[str, Any]`, *optional*):
            Additional parameters from which to initialize the configuration object.
    r  rE  Nr   beta1beta2shapesr+  c                 K   s>   |d u ri }|| _ tj| _|| _|| _|| _|| _|   d S r   )	r  r   r:   rT   r   r  r  r+  r   )ro   r   r  r  r  r+  r]   r=   r=   r>   r     s   	zSpQRConfig.__init__c                 C   s   t | jts
tdt | jtstdt | jtstd| jdkr'td| jdkr0td| jdkr9tdt | jtsCtd	d
S )r   zbits must be an intzbeta1 must be an intzbeta2 must be an intr  z%SpQR currently only supports bits = 3rE  z'SpQR currently only supports beta1 = 16z'SpQR currently only supports beta2 = 16zshapes must be a dictN)	r   r   r   r   r  r  rI   r  r   rx   r=   r=   r>   r     s   


zSpQRConfig.post_init)r  rE  rE  NN)r*   r+   r,   r   r   r	   r   rL   r   r   r   r=   r=   r=   r>   r    s&    

r  c                   @   sB   e Zd ZdZ			ddedeeef dee fdd	Z	d
d Z
dS )FineGrainedFP8Configam  
    FineGrainedFP8Config is a configuration class for fine-grained FP8 quantization used mainly for deepseek models.

    Args:
        activation_scheme (`str`, *optional*, defaults to `"dynamic"`):
            The scheme used for activation, the defaults and only support scheme for now is "dynamic".
        weight_block_size (`typing.Tuple[int, int]`, *optional*, defaults to `(128, 128)`):
            The size of the weight blocks for quantization, default is (128, 128).
        modules_to_not_convert (`list`, *optional*):
            A list of module names that should not be converted during quantization.
    dynamicr   r   Nactivation_schemeweight_block_sizer+  c                 K   s&   t j| _|| _|| _|| _|   d S r   )r   r;   rT   r+  r  r  r   )ro   r  r  r+  r]   r=   r=   r>   r     ra  zFineGrainedFP8Config.__init__c                 C   sf   | j  | _ | j dvrtd| j  dt| jdkrtd| jd dks-| jd dkr1tdd	S )
r  )r  zActivation scheme z not supportedr   z1weight_block_size must be a tuple of two integersr   r   z:weight_block_size must be a tuple of two positive integersN)r  rD   rI   r,  r  rx   r=   r=   r>   r     s   
zFineGrainedFP8Config.post_init)r  r  N)r*   r+   r,   r   rL   r
   r   r	   r   r   r   r=   r=   r=   r>   r    s    

r  c                   @   s   e Zd Zdd ZdS )QuarkConfigc                 K   s   t  rt rddlm} ddlm} ddlm} ddlm	} |d | _
d|v| _| j
dv r:|j|d	d
| _| | _n9||| _d|v rod|d v rdt|tdk rd|d d}td| d |di |d | _n| | _tj| _d S )Nr   )__version__)JsonExporterConfig)QuantConfigParser)ConfigrT   export)r   r(   F)is_bias_quantizedmin_kv_scalez0.8zThe parameter `min_kv_scale=z` was found in the model config.json's `quantization_config.export` configuration, but this parameter is supported only for quark>=0.8. Ignoring this configuration parameter. Please update the `amd-quark` package.r=   )r   r   r)   r   quark.torch.export.config.configr  2quark.torch.export.main_export.quant_config_parserr  &quark.torch.quantization.config.configr  custom_modelegacyfrom_custom_configr   json_export_configrb   r   r   rY   r   warningr   r<   rT   )ro   r]   quark_versionr  r  r  r  r=   r=   r>   r   	  s(   



 
zQuarkConfig.__init__N)r*   r+   r,   r   r=   r=   r=   r>   r    s    r  )<ru   r  importlib.metadatar   rl   r   r   enumr   inspectr   r   typingr   r   r   r	   r
   r   	packagingr   utilsr   r   r   r   r   r   r   r   import_utilsr   r   
get_loggerr*   r   rL   r   r?   rN   rS   r   r   r   r   r   r&  rD  rK  rX  r^  rj  rk  r  r  r  r  r  r  r  r=   r=   r=   r>   <module>   st    (

p| g m 1;6((! 67 F>'