From 90de5a4834c0b6fabe2b8382a3705731d5e16198 Mon Sep 17 00:00:00 2001 From: Kyunggeun Lee Date: Tue, 17 Dec 2024 23:24:25 -0800 Subject: [PATCH] Suppress pylint cyclic-import false alarm (#3661) Signed-off-by: Kyunggeun Lee --- .../torch/src/python/aimet_torch/_base/quantsim.py | 3 ++- .../aimet_torch/experimental/v2/quantsim/export_utils.py | 3 ++- .../torch/src/python/aimet_torch/layer_output_utils.py | 2 +- .../torch/src/python/aimet_torch/meta/connectedgraph.py | 3 ++- .../torch/src/python/aimet_torch/quantsim_config/builder.py | 1 + .../python/aimet_torch/quantsim_config/quantsim_config.py | 2 +- TrainingExtensions/torch/src/python/aimet_torch/utils.py | 6 +++--- .../python/aimet_torch/v1/quantsim_straight_through_grad.py | 3 ++- .../torch/src/python/aimet_torch/v1/tensor_quantizer.py | 1 + .../torch/src/python/aimet_torch/v2/_builder.py | 1 + .../python/aimet_torch/v2/quantization/base/quantizer.py | 1 + 11 files changed, 17 insertions(+), 9 deletions(-) diff --git a/TrainingExtensions/torch/src/python/aimet_torch/_base/quantsim.py b/TrainingExtensions/torch/src/python/aimet_torch/_base/quantsim.py index caf28887c20..af3e2edb2a3 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/_base/quantsim.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/_base/quantsim.py @@ -82,6 +82,7 @@ from aimet_torch.experimental.v2.quantsim.export_utils import _export_to_1_0_0 if TYPE_CHECKING: + # pylint: disable=cyclic-import from aimet_torch.v2.quantization.base.encoding import EncodingBase @@ -522,7 +523,7 @@ def _apply_exception_rules(self): """ Apply exception rules to specific op. For example, a rule can override high bitwidth to Embedding module """ - # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel, cyclic-import from aimet_torch.v2.nn import BaseQuantizationMixin for wrapper in self.qmodules(): diff --git a/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/quantsim/export_utils.py b/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/quantsim/export_utils.py index 4922e997ecc..761977b6af0 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/quantsim/export_utils.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/quantsim/export_utils.py @@ -112,7 +112,8 @@ def _get_activation_encodings(tensor_to_activation_encodings: Dict[str, List], t def _get_param_encodings(tensor_to_param_encodings: Dict[str, List], tensor_to_quantizer_map: Dict): - from aimet_torch.v2.quantization.affine import AffineQuantizerBase # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel, cyclic-import + from aimet_torch.v2.quantization.affine import AffineQuantizerBase param_encodings = [] for tensor, encodings in tensor_to_param_encodings.items(): diff --git a/TrainingExtensions/torch/src/python/aimet_torch/layer_output_utils.py b/TrainingExtensions/torch/src/python/aimet_torch/layer_output_utils.py index 00628c82af5..62ff1b1db80 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/layer_output_utils.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/layer_output_utils.py @@ -276,7 +276,7 @@ def get_layer_name_to_layer_output_name_map(model, naming_scheme: NamingScheme, :param dir_path: directory to temporarily save the constructed onnx/torchscrip model :return: dictionary of layer-name to layer-output name """ - # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel, cyclic-import if any(isinstance(module, BaseQuantizationMixin) for module in model.modules()): from aimet_torch.v2.quantsim import QuantizationSimModel else: diff --git a/TrainingExtensions/torch/src/python/aimet_torch/meta/connectedgraph.py b/TrainingExtensions/torch/src/python/aimet_torch/meta/connectedgraph.py index 7f6f340e503..8f0eb72e164 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/meta/connectedgraph.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/meta/connectedgraph.py @@ -1338,7 +1338,8 @@ def _is_recursive_parsing_needed(self, module: torch.nn.Module, :param trace: torch.jit trace of the module :return: Boolean whether recursive parsing needed or not. If needed returns True, False otherwise. """ - from aimet_torch.v2.nn import BaseQuantizationMixin # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel, cyclic-import + from aimet_torch.v2.nn import BaseQuantizationMixin if isinstance(module, BaseQuantizationMixin): return self._is_recursive_parsing_needed(module.get_original_module(), trace) diff --git a/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/builder.py b/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/builder.py index b5f66b8a788..7c6e69dbfcb 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/builder.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/builder.py @@ -91,6 +91,7 @@ def __init__(self, module_to_wrap: torch.nn.Module, weight_bw: int, activation_b # Create quantizer for each parameter and compute encodings self.param_quantizers = {} + # pylint: disable=import-outside-toplevel, cyclic-import from aimet_torch.v2.nn import BaseQuantizationMixin if isinstance(module_to_wrap, BaseQuantizationMixin): # NOTE: AIMET v2 qmodule always only quantizes the paramters that it directly owns diff --git a/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/quantsim_config.py b/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/quantsim_config.py index 0f4d5f4463f..d7d76db6477 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/quantsim_config.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/quantsim_config/quantsim_config.py @@ -735,7 +735,7 @@ def _create_module_to_quantsim_wrapper_dict(model: torch.nn.Module) -> Dict[torc :param model: Pytorch model with quantsim wrappers in place :return: Dictionary mapping modules in the model to corresponding quantsim wrappers """ - # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel, cyclic-import from aimet_torch._base.quantsim import _QuantizedModuleProtocol from aimet_torch.v2.nn import BaseQuantizationMixin module_to_quantsim_wrapper_dict = {} diff --git a/TrainingExtensions/torch/src/python/aimet_torch/utils.py b/TrainingExtensions/torch/src/python/aimet_torch/utils.py index 219b80dd29f..3e9d72d7f48 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/utils.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/utils.py @@ -925,7 +925,6 @@ def get_all_quantizers(model: torch.nn.Module): :param model: Root module :returns: List of parameter, input, and output quantizers """ - # pylint:disable = cyclic-import param_quantizers = [] input_quantizers = [] output_quantizers = [] @@ -959,8 +958,9 @@ def disable_all_quantizers(model: torch.nn.Module): :param model: Root module :returns: Handle that enable all quantizers in the model upon handle.remove(). """ - from aimet_torch.v2.nn.base import BaseQuantizationMixin # pylint: disable=import-outside-toplevel, cyclic-import - import aimet_torch.v2.utils as v2_utils # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel, cyclic-import + from aimet_torch.v2.nn.base import BaseQuantizationMixin + import aimet_torch.v2.utils as v2_utils if any(isinstance(m, BaseQuantizationMixin) for m in model.modules()): return v2_utils.remove_all_quantizers(model) diff --git a/TrainingExtensions/torch/src/python/aimet_torch/v1/quantsim_straight_through_grad.py b/TrainingExtensions/torch/src/python/aimet_torch/v1/quantsim_straight_through_grad.py index febff112cee..883d279568b 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/v1/quantsim_straight_through_grad.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/v1/quantsim_straight_through_grad.py @@ -44,7 +44,8 @@ from aimet_torch.v1.tensor_factory_utils import constant_like if TYPE_CHECKING: - from aimet_torch.v1.tensor_quantizer import LearnedGridTensorQuantizer # pylint:disable = cyclic-import + # pylint: disable=cyclic-import + from aimet_torch.v1.tensor_quantizer import LearnedGridTensorQuantizer @dataclass diff --git a/TrainingExtensions/torch/src/python/aimet_torch/v1/tensor_quantizer.py b/TrainingExtensions/torch/src/python/aimet_torch/v1/tensor_quantizer.py index f42cd3c7595..3ad831ebfa5 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/v1/tensor_quantizer.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/v1/tensor_quantizer.py @@ -54,6 +54,7 @@ from aimet_torch.fp_quantization import fp8_quantizer, INIT_MAP from aimet_torch.v1.tensor_factory_utils import constant_like if TYPE_CHECKING: + # pylint: disable=cyclic-import from aimet_torch.v2.quantization.base import EncodingBase _logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant) diff --git a/TrainingExtensions/torch/src/python/aimet_torch/v2/_builder.py b/TrainingExtensions/torch/src/python/aimet_torch/v2/_builder.py index fa2739a39c5..13832f5a1fa 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/v2/_builder.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/v2/_builder.py @@ -134,6 +134,7 @@ def realize(self): :return: v2 quant wrapper with specified properties """ + # pylint: disable=import-outside-toplevel, cyclic-import from aimet_torch.v2.nn import QuantizationMixin from aimet_torch.v2.nn.fake_quant import _legacy_impl diff --git a/TrainingExtensions/torch/src/python/aimet_torch/v2/quantization/base/quantizer.py b/TrainingExtensions/torch/src/python/aimet_torch/v2/quantization/base/quantizer.py index ca21166a7fc..d5b8f20406e 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/v2/quantization/base/quantizer.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/v2/quantization/base/quantizer.py @@ -54,6 +54,7 @@ from aimet_torch.utils import deprecated if TYPE_CHECKING: + # pylint: disable=cyclic-import from aimet_torch.v2.quantization.tensor import QuantizedTensorBase