Skip to content

Commit

Permalink
Suppress pylint cyclic-import false alarm (#3661)
Browse files Browse the repository at this point in the history
Signed-off-by: Kyunggeun Lee <[email protected]>
  • Loading branch information
quic-kyunggeu authored Dec 18, 2024
1 parent 8a9aef0 commit 90de5a4
Show file tree
Hide file tree
Showing 11 changed files with 17 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@
from aimet_torch.experimental.v2.quantsim.export_utils import _export_to_1_0_0

if TYPE_CHECKING:
# pylint: disable=cyclic-import
from aimet_torch.v2.quantization.base.encoding import EncodingBase


Expand Down Expand Up @@ -522,7 +523,7 @@ def _apply_exception_rules(self):
"""
Apply exception rules to specific op. For example, a rule can override high bitwidth to Embedding module
"""
# pylint: disable=import-outside-toplevel
# pylint: disable=import-outside-toplevel, cyclic-import
from aimet_torch.v2.nn import BaseQuantizationMixin

for wrapper in self.qmodules():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ def _get_activation_encodings(tensor_to_activation_encodings: Dict[str, List], t


def _get_param_encodings(tensor_to_param_encodings: Dict[str, List], tensor_to_quantizer_map: Dict):
from aimet_torch.v2.quantization.affine import AffineQuantizerBase # pylint: disable=import-outside-toplevel
# pylint: disable=import-outside-toplevel, cyclic-import
from aimet_torch.v2.quantization.affine import AffineQuantizerBase

param_encodings = []
for tensor, encodings in tensor_to_param_encodings.items():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ def get_layer_name_to_layer_output_name_map(model, naming_scheme: NamingScheme,
:param dir_path: directory to temporarily save the constructed onnx/torchscrip model
:return: dictionary of layer-name to layer-output name
"""
# pylint: disable=import-outside-toplevel
# pylint: disable=import-outside-toplevel, cyclic-import
if any(isinstance(module, BaseQuantizationMixin) for module in model.modules()):
from aimet_torch.v2.quantsim import QuantizationSimModel
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1338,7 +1338,8 @@ def _is_recursive_parsing_needed(self, module: torch.nn.Module,
:param trace: torch.jit trace of the module
:return: Boolean whether recursive parsing needed or not. If needed returns True, False otherwise.
"""
from aimet_torch.v2.nn import BaseQuantizationMixin # pylint: disable=import-outside-toplevel
# pylint: disable=import-outside-toplevel, cyclic-import
from aimet_torch.v2.nn import BaseQuantizationMixin
if isinstance(module, BaseQuantizationMixin):
return self._is_recursive_parsing_needed(module.get_original_module(), trace)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ def __init__(self, module_to_wrap: torch.nn.Module, weight_bw: int, activation_b
# Create quantizer for each parameter and compute encodings
self.param_quantizers = {}

# pylint: disable=import-outside-toplevel, cyclic-import
from aimet_torch.v2.nn import BaseQuantizationMixin
if isinstance(module_to_wrap, BaseQuantizationMixin):
# NOTE: AIMET v2 qmodule always only quantizes the paramters that it directly owns
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ def _create_module_to_quantsim_wrapper_dict(model: torch.nn.Module) -> Dict[torc
:param model: Pytorch model with quantsim wrappers in place
:return: Dictionary mapping modules in the model to corresponding quantsim wrappers
"""
# pylint: disable=import-outside-toplevel
# pylint: disable=import-outside-toplevel, cyclic-import
from aimet_torch._base.quantsim import _QuantizedModuleProtocol
from aimet_torch.v2.nn import BaseQuantizationMixin
module_to_quantsim_wrapper_dict = {}
Expand Down
6 changes: 3 additions & 3 deletions TrainingExtensions/torch/src/python/aimet_torch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -925,7 +925,6 @@ def get_all_quantizers(model: torch.nn.Module):
:param model: Root module
:returns: List of parameter, input, and output quantizers
"""
# pylint:disable = cyclic-import
param_quantizers = []
input_quantizers = []
output_quantizers = []
Expand Down Expand Up @@ -959,8 +958,9 @@ def disable_all_quantizers(model: torch.nn.Module):
:param model: Root module
:returns: Handle that enable all quantizers in the model upon handle.remove().
"""
from aimet_torch.v2.nn.base import BaseQuantizationMixin # pylint: disable=import-outside-toplevel, cyclic-import
import aimet_torch.v2.utils as v2_utils # pylint: disable=import-outside-toplevel
# pylint: disable=import-outside-toplevel, cyclic-import
from aimet_torch.v2.nn.base import BaseQuantizationMixin
import aimet_torch.v2.utils as v2_utils

if any(isinstance(m, BaseQuantizationMixin) for m in model.modules()):
return v2_utils.remove_all_quantizers(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@
from aimet_torch.v1.tensor_factory_utils import constant_like

if TYPE_CHECKING:
from aimet_torch.v1.tensor_quantizer import LearnedGridTensorQuantizer # pylint:disable = cyclic-import
# pylint: disable=cyclic-import
from aimet_torch.v1.tensor_quantizer import LearnedGridTensorQuantizer


@dataclass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
from aimet_torch.fp_quantization import fp8_quantizer, INIT_MAP
from aimet_torch.v1.tensor_factory_utils import constant_like
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from aimet_torch.v2.quantization.base import EncodingBase

_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ def realize(self):
:return: v2 quant wrapper with specified properties
"""
# pylint: disable=import-outside-toplevel, cyclic-import
from aimet_torch.v2.nn import QuantizationMixin
from aimet_torch.v2.nn.fake_quant import _legacy_impl

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
from aimet_torch.utils import deprecated

if TYPE_CHECKING:
# pylint: disable=cyclic-import
from aimet_torch.v2.quantization.tensor import QuantizedTensorBase


Expand Down

0 comments on commit 90de5a4

Please sign in to comment.