Skip to content

Commit

Permalink
Fix pylint error (#3473)
Browse files Browse the repository at this point in the history
Signed-off-by: Kyunggeun Lee <quic_kyunggeu@quicinc.com>
  • Loading branch information
quic-kyunggeu authored Nov 12, 2024
1 parent b4b5d33 commit ef829a9
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def set_precision(self, arg: Union[torch.nn.Module, Type[torch.nn.Module]],
Examples: TODO
"""

# pylint: disable=too-many-branches
if activation:
if isinstance(activation, List):
for act in activation:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,11 @@
# =============================================================================
"""Utilities to achieve mixed precision"""

from dataclasses import dataclass, field
# pylint: disable=logging-fstring-interpolation

from dataclasses import dataclass
from enum import Enum
from typing import Dict, Type, List, TypeAlias, Literal, Tuple, Optional, Union, Generator
from typing import Dict, Type, List, TypeAlias, Literal, Optional, Union, Generator
import functools

import torch
Expand All @@ -66,10 +68,9 @@ class Precision:
def __lt__(self, other):
if self == other:
return False
elif self.bitwidth != other.bitwidth:
if self.bitwidth != other.bitwidth:
return self.bitwidth < other.bitwidth
else:
return self.data_type == QuantizationDataType.int and other.data_type != QuantizationDataType.int
return self.data_type == QuantizationDataType.int and other.data_type != QuantizationDataType.int


TranslateUserDtypes = {
Expand Down Expand Up @@ -226,6 +227,7 @@ def create_mp_request(torch_module: BaseQuantizationMixin, module_name: str, idx
raise RuntimeError(f"Unsupported request type {user_request.request_type} encountered")
return mp_requests

# pylint: disable=unused-argument, no-self-use
def _apply_backend_awareness(self, mp_requests: Dict, config: str = "", strict: bool = True) -> Dict:
"""
Apply backend awareness to the requests from the user
Expand Down Expand Up @@ -287,7 +289,7 @@ def _get_module_from_cg_op(self, cg_op: CG_Op) -> Optional[torch.nn.Module]:
if module is None:
return None

fully_qualified_name = self._sim.connected_graph._module_to_name[module]
fully_qualified_name = self._sim.connected_graph._module_to_name[module] # pylint: disable=protected-access
_, name = fully_qualified_name.split('.', maxsplit=1)
quant_module = _rgetattr(self._sim.model, name)
return quant_module
Expand Down Expand Up @@ -451,7 +453,7 @@ def _propagate_request_upstream_helper(module):
parent_module = self._get_parent_module_at_input_idx(module, in_idx)
if parent_module is None:
logger.warning(f"Warning: unable to propagate request at {module} upward. "
f"Parent module could not be found.")
"Parent module could not be found.")
continue

# TODO: remove this once ops with multiple outputs are supported
Expand Down Expand Up @@ -547,7 +549,7 @@ def _apply_requests_to_sim(self, mp_requests: Dict):
request.output_candidates[idx])

def apply(self, user_requests: Dict[int, UserRequest], config: str = "", strict: bool = True,
log_file: str = './mmp_log.txt'):
log_file: str = './mmp_log.txt'): # pylint: disable=unused-argument
"""
Apply the mp settings specified through the set_precision/set_model_input_precision/set_model_output_precision
calls to the QuantSim object
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ def _validate_arguments(tensor: torch.Tensor, scale: torch.Tensor,
msg = f"Scale of shape {scale.shape} cannot be expanded like input tensor of shape {tensor.shape}. "
# Additional message if the tensor is empty
if tensor.numel() == 0:
msg += (f"Detected that the tensor is empty, which may be caused by the following reasons: "
f"1. The input tensor is incorrect. "
f"2. Improper use of model inference without initializing DeepSpeed after offloading parameters.")
msg += ("Detected that the tensor is empty, which may be caused by the following reasons: "
"1. The input tensor is incorrect. "
"2. Improper use of model inference without initializing DeepSpeed after offloading parameters.")
raise RuntimeError(msg)

if qmin is not None and qmax is not None:
Expand Down

0 comments on commit ef829a9

Please sign in to comment.