forked from huggingface/diffusers
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathquantization_config.py
More file actions
751 lines (627 loc) · 31.9 KB
/
quantization_config.py
File metadata and controls
751 lines (627 loc) · 31.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from
https://github.com/huggingface/transformers/blob/52cb4034ada381fe1ffe8d428a1076e5411a8026/src/transformers/utils/quantization_config.py
"""
from __future__ import annotations
import copy
import importlib.metadata
import json
import os
import warnings
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable
from packaging import version
from ..utils import deprecate, is_torch_available, is_torchao_version, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class QuantizationMethod(str, Enum):
BITS_AND_BYTES = "bitsandbytes"
GGUF = "gguf"
TORCHAO = "torchao"
QUANTO = "quanto"
MODELOPT = "modelopt"
@dataclass
class QuantizationConfigMixin:
"""
Mixin class for quantization config
"""
quant_method: QuantizationMethod
_exclude_attributes_at_init = []
@classmethod
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):
"""
Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters.
Args:
config_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
`PreTrainedModel`.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
"""
config = cls(**config_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
if return_unused_kwargs:
return config, kwargs
else:
return config
def to_json_file(self, json_file_path: str | os.PathLike):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default
`QuantizationConfig()` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
config_dict = self.to_dict()
json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
writer.write(json_string)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
return copy.deepcopy(self.__dict__)
def __iter__(self):
"""allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin"""
for attr, value in copy.deepcopy(self.__dict__).items():
yield attr, value
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def update(self, **kwargs):
"""
Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
returning all the unused kwargs.
Args:
kwargs (`dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
"""
to_remove = []
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
# Remove all the attributes that were updated, without modifying the input dict
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
return unused_kwargs
@dataclass
class BitsAndBytesConfig(QuantizationConfigMixin):
"""
This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded using `bitsandbytes`.
This replaces `load_in_8bit` or `load_in_4bit` therefore both options are mutually exclusive.
Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`,
then more arguments will be added to this class.
Args:
load_in_8bit (`bool`, *optional*, defaults to `False`):
This flag is used to enable 8-bit quantization with LLM.int8().
load_in_4bit (`bool`, *optional*, defaults to `False`):
This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from
`bitsandbytes`.
llm_int8_threshold (`float`, *optional*, defaults to 6.0):
This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix
Multiplication for Transformers at Scale` paper: https://huggingface.co/papers/2208.07339 Any hidden states
value that is above this threshold will be considered an outlier and the operation on those values will be
done in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5],
but there are some exceptional systematic outliers that are very differently distributed for large models.
These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of
magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6,
but a lower threshold might be needed for more unstable models (small models, fine-tuning).
llm_int8_skip_modules (`list[str]`, *optional*):
An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as
Jukebox that has several heads in different places and not necessarily at the last position. For example
for `CausalLM` models, the last `lm_head` is typically kept in its original `dtype`.
llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`):
This flag is used for advanced use cases and users that are aware of this feature. If you want to split
your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use
this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8
operations will not be run on CPU.
llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`):
This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not
have to be converted back and forth for the backward pass.
bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`):
This sets the computational type which might be different than the input type. For example, inputs might be
fp32, but computation can be set to bf16 for speedups.
bnb_4bit_quant_type (`str`, *optional*, defaults to `"fp4"`):
This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types
which are specified by `fp4` or `nf4`.
bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`):
This flag is used for nested quantization where the quantization constants from the first quantization are
quantized again.
bnb_4bit_quant_storage (`torch.dtype` or str, *optional*, defaults to `torch.uint8`):
This sets the storage type to pack the quanitzed 4-bit prarams.
kwargs (`dict[str, Any]`, *optional*):
Additional parameters from which to initialize the configuration object.
"""
_exclude_attributes_at_init = ["_load_in_4bit", "_load_in_8bit", "quant_method"]
def __init__(
self,
load_in_8bit=False,
load_in_4bit=False,
llm_int8_threshold=6.0,
llm_int8_skip_modules=None,
llm_int8_enable_fp32_cpu_offload=False,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=None,
bnb_4bit_quant_type="fp4",
bnb_4bit_use_double_quant=False,
bnb_4bit_quant_storage=None,
**kwargs,
):
self.quant_method = QuantizationMethod.BITS_AND_BYTES
if load_in_4bit and load_in_8bit:
raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time")
self._load_in_8bit = load_in_8bit
self._load_in_4bit = load_in_4bit
self.llm_int8_threshold = llm_int8_threshold
self.llm_int8_skip_modules = llm_int8_skip_modules
self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload
self.llm_int8_has_fp16_weight = llm_int8_has_fp16_weight
self.bnb_4bit_quant_type = bnb_4bit_quant_type
self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant
if bnb_4bit_compute_dtype is None:
self.bnb_4bit_compute_dtype = torch.float32
elif isinstance(bnb_4bit_compute_dtype, str):
self.bnb_4bit_compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
elif isinstance(bnb_4bit_compute_dtype, torch.dtype):
self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
if bnb_4bit_quant_storage is None:
self.bnb_4bit_quant_storage = torch.uint8
elif isinstance(bnb_4bit_quant_storage, str):
if bnb_4bit_quant_storage not in [
"float16",
"float32",
"int8",
"uint8",
"float64",
"bfloat16",
]:
raise ValueError(
"`bnb_4bit_quant_storage` must be a valid string (one of 'float16', 'float32', 'int8', 'uint8', 'float64', 'bfloat16') "
)
self.bnb_4bit_quant_storage = getattr(torch, bnb_4bit_quant_storage)
elif isinstance(bnb_4bit_quant_storage, torch.dtype):
self.bnb_4bit_quant_storage = bnb_4bit_quant_storage
else:
raise ValueError("bnb_4bit_quant_storage must be a string or a torch.dtype")
if kwargs and not all(k in self._exclude_attributes_at_init for k in kwargs):
logger.warning(f"Unused kwargs: {list(kwargs.keys())}. These kwargs are not used in {self.__class__}.")
self.post_init()
@property
def load_in_4bit(self):
return self._load_in_4bit
@load_in_4bit.setter
def load_in_4bit(self, value: bool):
if not isinstance(value, bool):
raise TypeError("load_in_4bit must be a boolean")
if self.load_in_8bit and value:
raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time")
self._load_in_4bit = value
@property
def load_in_8bit(self):
return self._load_in_8bit
@load_in_8bit.setter
def load_in_8bit(self, value: bool):
if not isinstance(value, bool):
raise TypeError("load_in_8bit must be a boolean")
if self.load_in_4bit and value:
raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time")
self._load_in_8bit = value
def post_init(self):
r"""
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.load_in_4bit, bool):
raise TypeError("load_in_4bit must be a boolean")
if not isinstance(self.load_in_8bit, bool):
raise TypeError("load_in_8bit must be a boolean")
if not isinstance(self.llm_int8_threshold, float):
raise TypeError("llm_int8_threshold must be a float")
if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list):
raise TypeError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool):
raise TypeError("llm_int8_enable_fp32_cpu_offload must be a boolean")
if not isinstance(self.llm_int8_has_fp16_weight, bool):
raise TypeError("llm_int8_has_fp16_weight must be a boolean")
if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
raise TypeError("bnb_4bit_compute_dtype must be torch.dtype")
if not isinstance(self.bnb_4bit_quant_type, str):
raise TypeError("bnb_4bit_quant_type must be a string")
if not isinstance(self.bnb_4bit_use_double_quant, bool):
raise TypeError("bnb_4bit_use_double_quant must be a boolean")
if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
"0.39.0"
):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version"
)
def is_quantizable(self):
r"""
Returns `True` if the model is quantizable, `False` otherwise.
"""
return self.load_in_8bit or self.load_in_4bit
def quantization_method(self):
r"""
This method returns the quantization method used for the model. If the model is not quantizable, it returns
`None`.
"""
if self.load_in_8bit:
return "llm_int8"
elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4":
return "fp4"
elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4":
return "nf4"
else:
return None
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1]
output["bnb_4bit_quant_storage"] = str(output["bnb_4bit_quant_storage"]).split(".")[1]
output["load_in_4bit"] = self.load_in_4bit
output["load_in_8bit"] = self.load_in_8bit
return output
def __repr__(self):
config_dict = self.to_dict()
return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n"
def to_diff_dict(self) -> dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = BitsAndBytesConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict
@dataclass
class GGUFQuantizationConfig(QuantizationConfigMixin):
"""This is a config class for GGUF Quantization techniques.
Args:
compute_dtype: (`torch.dtype`, defaults to `torch.float32`):
This sets the computational type which might be different than the input type. For example, inputs might be
fp32, but computation can be set to bf16 for speedups.
"""
def __init__(self, compute_dtype: "torch.dtype" | None = None):
self.quant_method = QuantizationMethod.GGUF
self.compute_dtype = compute_dtype
self.pre_quantized = True
# TODO: (Dhruv) Add this as an init argument when we can support loading unquantized checkpoints.
self.modules_to_not_convert = None
if self.compute_dtype is None:
self.compute_dtype = torch.float32
@dataclass
class TorchAoConfig(QuantizationConfigMixin):
"""This is a config class for torchao quantization/sparsity techniques.
Args:
quant_type (`AOBaseConfig`):
An `AOBaseConfig` subclass instance specifying the quantization type. See the [torchao
documentation](https://docs.pytorch.org/ao/main/api_ref_quantization.html#inference-apis-for-quantize) for
available config classes (e.g. `Int4WeightOnlyConfig`, `Int8WeightOnlyConfig`, `Float8WeightOnlyConfig`,
`Float8DynamicActivationFloat8WeightConfig`, etc.).
modules_to_not_convert (`list[str]`, *optional*, default to `None`):
The list of modules to not quantize, useful for quantizing models that explicitly require to have some
modules left in their original precision.
Example:
```python
from diffusers import FluxTransformer2DModel, TorchAoConfig
from torchao.quantization import Int8WeightOnlyConfig
quantization_config = TorchAoConfig(Int8WeightOnlyConfig())
transformer = FluxTransformer2DModel.from_pretrained(
"black-forest-labs/Flux.1-Dev",
subfolder="transformer",
quantization_config=quantization_config,
torch_dtype=torch.bfloat16,
)
```
"""
def __init__(
self,
quant_type: "AOBaseConfig", # noqa: F821
modules_to_not_convert: list[str] | None = None,
**kwargs,
) -> None:
self.quant_method = QuantizationMethod.TORCHAO
self.quant_type = quant_type
self.modules_to_not_convert = modules_to_not_convert
self.post_init()
def post_init(self):
if is_torchao_version("<", "0.15.0"):
raise ValueError("TorchAoConfig requires torchao >= 0.15.0. Please upgrade with `pip install -U torchao`.")
from torchao.quantization.quant_api import AOBaseConfig
if not isinstance(self.quant_type, AOBaseConfig):
raise TypeError(f"quant_type must be an AOBaseConfig instance, got {type(self.quant_type).__name__}")
def to_dict(self):
"""Convert configuration to a dictionary."""
d = super().to_dict()
# Handle AOBaseConfig serialization
from torchao.core.config import config_to_dict
# For now we assume there is 1 config per Transformer, however in the future
# we may want to support a config per fqn.
# See: https://docs.pytorch.org/ao/stable/api_reference/generated/torchao.quantization.quantize_.html
d["quant_type"] = {"default": config_to_dict(self.quant_type)}
return d
@classmethod
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):
"""Create configuration from a dictionary."""
if not is_torchao_version(">=", "0.15.0"):
raise NotImplementedError("TorchAoConfig requires torchao >= 0.15.0 for construction from dict")
config_dict = config_dict.copy()
quant_type = config_dict.pop("quant_type")
# Check if we only have one key which is "default"
# In the future we may update this
assert len(quant_type) == 1 and "default" in quant_type, (
"Expected only one key 'default' in quant_type dictionary"
)
quant_type = quant_type["default"]
# Deserialize quant_type if needed
from torchao.core.config import config_from_dict
quant_type = config_from_dict(quant_type)
return cls(quant_type=quant_type, **config_dict)
def get_apply_tensor_subclass(self):
"""Create the appropriate quantization method based on configuration."""
return self.quant_type
def __repr__(self):
config_dict = self.to_dict()
return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n"
@dataclass
class QuantoConfig(QuantizationConfigMixin):
"""
This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded using `quanto`.
Args:
weights_dtype (`str`, *optional*, defaults to `"int8"`):
The target dtype for the weights after quantization. Supported values are ("float8","int8","int4","int2")
modules_to_not_convert (`list`, *optional*, default to `None`):
The list of modules to not quantize, useful for quantizing models that explicitly require to have some
modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).
"""
def __init__(
self,
weights_dtype: str = "int8",
modules_to_not_convert: list[str] | None = None,
**kwargs,
):
deprecation_message = "`QuantoConfig` is deprecated and will be removed in version 1.0.0."
deprecate("QuantoConfig", "1.0.0", deprecation_message)
self.quant_method = QuantizationMethod.QUANTO
self.weights_dtype = weights_dtype
self.modules_to_not_convert = modules_to_not_convert
self.post_init()
def post_init(self):
r"""
Safety checker that arguments are correct
"""
accepted_weights = ["float8", "int8", "int4", "int2"]
if self.weights_dtype not in accepted_weights:
raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights_dtype}")
@dataclass
class NVIDIAModelOptConfig(QuantizationConfigMixin):
"""This is a config class to use nvidia modelopt for quantization.
Args:
quant_type (`str`):
The type of quantization we want to use, following is how to use:
**weightquant_activationquant ==> FP8_FP8** In the above example we have use FP8 for both weight and
activation quantization. Following are the all the options:
- FP8
- INT8
- INT4
- NF4
- NVFP4
modules_to_not_convert (`list[str]`, *optional*, default to `None`):
The list of modules to not quantize, useful for quantizing models that explicitly require to have some
weight_only (`bool`, *optional*, default to `False`):
If set to `True`, the quantization will be applied only to the weights of the model.
channel_quantize (`int`, *optional*, default to `None`):
The channel quantization axis, useful for quantizing models across different axes.
block_quantize (`int`, *optional*, default to `None`):
The block size, useful to further quantize each channel/axes into blocks.
scale_channel_quantize (`int`, *optional*, default to `None`):
The scale channel quantization axis, useful for quantizing calculated scale across different axes.
scale_block_quantize (`int`, *optional*, default to `None`):
The scale block size, useful for quantizing each scale channel/axes into blocks.
algorithm (`str`, *optional*, default to `"max"`):
The algorithm to use for quantization, currently only supports `"max"`.
forward_loop (`Callable`, *optional*, default to `None`):
The forward loop function to use for calibration during quantization.
modelopt_config (`dict`, *optional*, default to `None`):
The modelopt config, useful for passing custom configs to modelopt.
disable_conv_quantization (`bool`, *optional*, default to `False`):
If set to `True`, the quantization will be disabled for convolutional layers.
kwargs (`dict[str, Any]`, *optional*):
Additional parameters which are to be used for calibration.
"""
quanttype_to_numbits = {
"FP8": (4, 3),
"INT8": 8,
"INT4": 4,
"NF4": 4,
"NVFP4": (2, 1),
}
quanttype_to_scalingbits = {
"NF4": 8,
"NVFP4": (4, 3),
}
def __init__(
self,
quant_type: str,
modules_to_not_convert: list[str] | None = None,
weight_only: bool = True,
channel_quantize: int | None = None,
block_quantize: int | None = None,
scale_channel_quantize: int | None = None,
scale_block_quantize: int | None = None,
algorithm: str = "max",
forward_loop: Callable | None = None,
modelopt_config: dict | None = None,
disable_conv_quantization: bool = False,
**kwargs,
) -> None:
self.quant_method = QuantizationMethod.MODELOPT
self._normalize_quant_type(quant_type)
self.modules_to_not_convert = modules_to_not_convert
self.weight_only = weight_only
self.channel_quantize = channel_quantize
self.block_quantize = block_quantize
self.calib_cfg = {
"method": algorithm,
# add more options here if needed
}
self.forward_loop = forward_loop
self.scale_channel_quantize = scale_channel_quantize
self.scale_block_quantize = scale_block_quantize
self.modelopt_config = self.get_config_from_quant_type() if not modelopt_config else modelopt_config
self.disable_conv_quantization = disable_conv_quantization
def check_model_patching(self, operation: str = "loading"):
# ModelOpt imports diffusers internally. This is here to prevent circular imports
from modelopt.torch.opt.plugins.huggingface import _PATCHED_CLASSES
if len(_PATCHED_CLASSES) == 0:
warning_msg = (
f"Not {operation} weights in modelopt format. This might cause unreliable behavior."
"Please make sure to run the following code before loading/saving model weights:\n\n"
" from modelopt.torch.opt import enable_huggingface_checkpointing\n"
" enable_huggingface_checkpointing()\n"
)
warnings.warn(warning_msg)
def _normalize_quant_type(self, quant_type: str) -> str:
"""
Validates and normalizes the quantization type string.
Splits the quant_type into weight and activation components, verifies them against supported types, and
replaces unsupported values with safe defaults.
Args:
quant_type (str): The input quantization type string (e.g., 'FP8_INT8').
Returns:
str: A valid quantization type string (e.g., 'FP8_INT8' or 'FP8').
"""
parts = quant_type.split("_")
w_type = parts[0]
act_type = parts[1] if len(parts) > 1 else None
if len(parts) > 2:
logger.warning(f"Quantization type {quant_type} is not supported. Picking FP8_INT8 as default")
w_type = "FP8"
act_type = None
else:
if w_type not in NVIDIAModelOptConfig.quanttype_to_numbits:
logger.warning(f"Weight Quantization type {w_type} is not supported. Picking FP8 as default")
w_type = "FP8"
if act_type is not None and act_type not in NVIDIAModelOptConfig.quanttype_to_numbits:
logger.warning(f"Activation Quantization type {act_type} is not supported. Picking INT8 as default")
act_type = None
self.quant_type = w_type + ("_" + act_type if act_type is not None else "")
def get_config_from_quant_type(self) -> dict[str, Any]:
"""
Get the config from the quantization type.
"""
import modelopt.torch.quantization as mtq
BASE_CONFIG = {
"quant_cfg": {
"*weight_quantizer": {"fake_quant": False},
"*input_quantizer": {},
"*output_quantizer": {"enable": False},
"*q_bmm_quantizer": {},
"*k_bmm_quantizer": {},
"*v_bmm_quantizer": {},
"*softmax_quantizer": {},
**mtq.config._default_disabled_quantizer_cfg,
},
"algorithm": self.calib_cfg,
}
quant_cfg = BASE_CONFIG["quant_cfg"]
if self.weight_only:
for k in quant_cfg:
if "*weight_quantizer" not in k and not quant_cfg[k]:
quant_cfg[k]["enable"] = False
parts = self.quant_type.split("_")
w_type = parts[0]
act_type = parts[1].replace("A", "") if len(parts) > 1 else None
for k in quant_cfg:
if k not in mtq.config._default_disabled_quantizer_cfg and "enable" not in quant_cfg[k]:
if k == "*input_quantizer":
if act_type is not None:
quant_cfg[k]["num_bits"] = NVIDIAModelOptConfig.quanttype_to_numbits[act_type]
continue
quant_cfg[k]["num_bits"] = NVIDIAModelOptConfig.quanttype_to_numbits[w_type]
if self.block_quantize is not None and self.channel_quantize is not None:
quant_cfg["*weight_quantizer"]["block_sizes"] = {self.channel_quantize: self.block_quantize}
quant_cfg["*input_quantizer"]["block_sizes"] = {
self.channel_quantize: self.block_quantize,
"type": "dynamic",
}
elif self.channel_quantize is not None:
quant_cfg["*weight_quantizer"]["axis"] = self.channel_quantize
quant_cfg["*input_quantizer"]["axis"] = self.channel_quantize
quant_cfg["*input_quantizer"]["type"] = "dynamic"
# Only fixed scaling sizes are supported for now in modelopt
if self.scale_channel_quantize is not None and self.scale_block_quantize is not None:
if w_type in NVIDIAModelOptConfig.quanttype_to_scalingbits:
quant_cfg["*weight_quantizer"]["block_sizes"].update(
{
"scale_bits": NVIDIAModelOptConfig.quanttype_to_scalingbits[w_type],
"scale_block_sizes": {self.scale_channel_quantize: self.scale_block_quantize},
}
)
if act_type and act_type in NVIDIAModelOptConfig.quanttype_to_scalingbits:
quant_cfg["*input_quantizer"]["block_sizes"].update(
{
"scale_bits": NVIDIAModelOptConfig.quanttype_to_scalingbits[act_type],
"scale_block_sizes": {self.scale_channel_quantize: self.scale_block_quantize},
}
)
return BASE_CONFIG