Skip to content

Commit dc8785a

Browse files
Zafar Takhirovfacebook-github-bot
authored andcommitted
Refactoing names for consistency
Summary: Pull Request resolved: #27670 Test Plan: Imported from OSS Differential Revision: D17846269 Pulled By: z-a-f fbshipit-source-id: ed3c7441c185bf11b2e62879aa3ecbc654aa2d4e
1 parent 9540f6c commit dc8785a

File tree

10 files changed

+21
-21
lines changed

10 files changed

+21
-21
lines changed

docs/source/quantization.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -190,10 +190,10 @@ Layers for the quantization-aware training
190190
* :class:`~torch.quantization.Observer` — Abstract base class for observers
191191
* Quantization configurations
192192
* :class:`~torch.quantization.QConfig` — Quantization configuration class
193-
* ``default_qconfig`` — Same as ``QConfig(activation=default_observer, weight=default_weight_observer)`` (See :class:`~torch.quantization.QConfig.QConfig`)
194-
* :attr:`~torch.quantization.default_qat_qconfig` — Same as ``QConfig(activation=default_fake_quant, weight=default_weight_fake_quant)`` (See :class:`~torch.quantization.QConfig.QConfig`)
195-
* :attr:`~torch.quantization.default_dynamic_qconfig` — Same as ``QConfigDynamic(weight=default_weight_observer)`` (See :class:`~torch.quantization.QConfig.QConfigDynamic`)
196-
* :attr:`~torch.quantization.float16_dynamic_qconfig` — Same as ``QConfigDynamic(weight=NoopObserver.with_args(dtype=torch.float16))`` (See :class:`~torch.quantization.QConfig.QConfigDynamic`)
193+
* :attr:`~torch.quantization.default_qconfig` — Same as ``QConfig(activation=default_observer, weight=default_weight_observer)`` (See :class:`~torch.quantization.qconfig.QConfig`)
194+
* :attr:`~torch.quantization.default_qat_qconfig` — Same as ``QConfig(activation=default_fake_quant, weight=default_weight_fake_quant)`` (See :class:`~torch.quantization.qconfig.QConfig`)
195+
* :attr:`~torch.quantization.default_dynamic_qconfig` — Same as ``QConfigDynamic(weight=default_weight_observer)`` (See :class:`~torch.quantization.qconfig.QConfigDynamic`)
196+
* :attr:`~torch.quantization.float16_dynamic_qconfig` — Same as ``QConfigDynamic(weight=NoopObserver.with_args(dtype=torch.float16))`` (See :class:`~torch.quantization.qconfig.QConfigDynamic`)
197197
* Stubs
198198
* :class:`~torch.quantization.DeQuantStub` - placeholder module for dequantize() operation in float-valued models
199199
* :class:`~torch.quantization.QuantStub` - placeholder module for quantize() operation in float-valued models

test/test_qat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import torch
77
from torch.nn import Conv2d, BatchNorm2d, ReLU
88
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
9-
from torch.quantization.QConfig import default_qat_qconfig
9+
from torch.quantization.qconfig import default_qat_qconfig
1010
import torch.backends.mkldnn
1111
from common_utils import TestCase, run_tests
1212
from hypothesis import given

test/test_quantized_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ def test_weight_only_activation_only_fakequant(self):
102102
torch.manual_seed(67)
103103
calib_data = torch.rand(2048, 3, 15, 15, dtype=torch.float32)
104104
eval_data = torch.rand(10, 3, 15, 15, dtype=torch.float32)
105-
qconfigset = set([torch.quantization.default_weight_only_quant_qconfig,
106-
torch.quantization.default_activation_only_quant_qconfig])
105+
qconfigset = set([torch.quantization.default_weight_only_qconfig,
106+
torch.quantization.default_activation_only_qconfig])
107107
SQNRTarget = [35, 45]
108108
for idx, qconfig in enumerate(qconfigset):
109109
my_model = ModelMultipleOpsNoAvgPool().to(torch.float32)

torch/nn/quantized/dynamic/modules/linear.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def from_float(cls, mod):
6161
# We have the circular import issues if we import the qconfig in the beginning of this file:
6262
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
6363
# import until we need it.
64-
from torch.quantization.QConfig import default_dynamic_qconfig
64+
from torch.quantization.qconfig import default_dynamic_qconfig
6565
weight_observer = default_dynamic_qconfig.weight()
6666
assert weight_observer.dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
6767
weight_observer(mod.weight)

torch/nn/quantized/dynamic/modules/rnn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ def from_float(cls, mod):
227227
# We have the circular import issues if we import the qconfig in the beginning of this file:
228228
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
229229
# import until we need it.
230-
from torch.quantization.QConfig import default_dynamic_qconfig
230+
from torch.quantization.qconfig import default_dynamic_qconfig
231231
weight_observer = default_dynamic_qconfig.weight()
232232

233233
dtype = weight_observer.dtype

torch/quantization/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from __future__ import absolute_import, division, print_function, unicode_literals
22
from .quantize import * # noqa: F401
33
from .observer import * # noqa: F401
4-
from .QConfig import * # noqa: F401
4+
from .qconfig import * # noqa: F401
55
from .fake_quantize import * # noqa: F401
66
from .fuse_modules import fuse_modules # noqa: F401
77
from .stubs import * # noqa: F401
@@ -24,7 +24,7 @@ def default_eval_fn(model, calib_data):
2424
'propagate_qconfig_', 'add_quant_dequant', 'add_observer_', 'swap_module',
2525
'default_eval_fn', 'get_observer_dict',
2626
# Observers
27-
'Observer', 'WeightObserver', 'observer', 'default_observer',
27+
'ObserverBase', 'WeightObserver', 'observer', 'default_observer',
2828
'default_weight_observer',
2929
# QConfig
3030
'QConfig', 'default_qconfig', 'default_dynamic_qconfig', 'float16_dynamic_qconfig',

torch/quantization/_quantize_script.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from __future__ import absolute_import, division, print_function, unicode_literals
22

33
import torch
4-
from .QConfig import QConfig
4+
from .qconfig import QConfig
55

66
class PackedParams(torch.nn.Module):
77
def __init__(self):

torch/quantization/observer.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def _with_args(cls_or_self, **kwargs):
3939
ABC = ABCMeta(str("ABC"), (object,), {}) # compatible with Python 2 *and* 3:
4040

4141

42-
class Observer(ABC, nn.Module):
42+
class ObserverBase(ABC, nn.Module):
4343
r"""
4444
Observer base Module. Any observer implementation should derive from this class.
4545
@@ -49,7 +49,7 @@ class Observer(ABC, nn.Module):
4949
the collected statistics.
5050
"""
5151
def __init__(self, dtype):
52-
super(Observer, self).__init__()
52+
super(ObserverBase, self).__init__()
5353
self.dtype = dtype
5454

5555
@abstractmethod
@@ -63,7 +63,7 @@ def calculate_qparams(self, **kwargs):
6363
with_args = classmethod(_with_args)
6464

6565

66-
class _ObserverBase(Observer):
66+
class _ObserverBase(ObserverBase):
6767
r"""
6868
Common base for all qint/quint8 observers
6969
"""
@@ -636,7 +636,7 @@ def get_tensor_value(self):
636636
return self.tensor_val
637637

638638

639-
class NoopObserver(Observer):
639+
class NoopObserver(ObserverBase):
640640
r"""
641641
Observer that doesn't do anything and just passes its configuration to the
642642
quantized module's ``.from_float()``.
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,10 +67,10 @@ def __new__(cls, weight):
6767
default_qat_qconfig = QConfig(activation=default_fake_quant,
6868
weight=default_weight_fake_quant)
6969

70-
default_weight_only_quant_qconfig = QConfig(activation=torch.nn.Identity,
71-
weight=default_weight_fake_quant)
72-
default_activation_only_quant_qconfig = QConfig(activation=default_fake_quant,
73-
weight=torch.nn.Identity)
70+
default_weight_only_qconfig = QConfig(activation=torch.nn.Identity,
71+
weight=default_weight_fake_quant)
72+
default_activation_only_qconfig = QConfig(activation=default_fake_quant,
73+
weight=torch.nn.Identity)
7474

7575
def get_default_qconfig(backend='fbgemm'):
7676
if backend == 'fbgemm':

torch/quantization/quantize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
DEFAULT_QAT_MODULE_MAPPING,
1515
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST)
1616
from .stubs import DeQuantStub, QuantWrapper
17-
from .QConfig import default_dynamic_qconfig, float16_dynamic_qconfig
17+
from .qconfig import default_dynamic_qconfig, float16_dynamic_qconfig
1818

1919
def _propagate_qconfig_helper(module, qconfig_dict, white_list=None,
2020
qconfig_parent=None, prefix=''):

0 commit comments

Comments
 (0)