diff --git a/examples/qualcomm/custom_op/custom_ops_1.py b/examples/qualcomm/custom_op/custom_ops_1.py index 31b3b6ff3ec..ed99eabc9c8 100644 --- a/examples/qualcomm/custom_op/custom_ops_1.py +++ b/examples/qualcomm/custom_op/custom_ops_1.py @@ -70,11 +70,10 @@ def annotate_custom(gm: torch.fx.GraphModule) -> None: This function is specific for custom op. The source_fn of the rewritten nn module turns out to be "my_ops.mul3.default" """ - from executorch.backends.qualcomm.quantizer.annotators import _is_annotated - from executorch.backends.qualcomm.quantizer.qconfig import ( get_ptq_per_channel_quant_config, ) + from executorch.backends.qualcomm.quantizer.rules import _is_annotated from torch.fx import Node from torchao.quantization.pt2e.quantizer import QuantizationAnnotation from torchao.quantization.pt2e.quantizer.quantizer import Q_ANNOTATION_KEY diff --git a/examples/qualcomm/oss_scripts/fastvit.py b/examples/qualcomm/oss_scripts/fastvit.py index 3e620ab0300..87d90bb61b7 100644 --- a/examples/qualcomm/oss_scripts/fastvit.py +++ b/examples/qualcomm/oss_scripts/fastvit.py @@ -12,16 +12,13 @@ import numpy as np import torch -from executorch.backends.qualcomm.quantizer.annotators import ( - QuantizationConfig, - QuantizationSpec, -) from executorch.backends.qualcomm.quantizer.observers.per_channel_param_observer import ( PerChannelParamObserver, ) from executorch.backends.qualcomm.quantizer.qconfig import ( _derived_bias_quant_spec, MovingAverageMinMaxObserver, + QuantizationConfig, ) from executorch.backends.qualcomm.quantizer.quantizer import QuantDtype @@ -40,6 +37,7 @@ SimpleADB, topk_accuracy, ) +from torchao.quantization.pt2e.quantizer import QuantizationSpec def get_instance(repo_path: str, checkpoint_path: str):