From 7ab91c52d180049343072987a44b759303a39304 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Tue, 5 Nov 2024 05:47:08 -0500 Subject: [PATCH] rm autocast --- optimum/exporters/ipex/modeling_utils.py | 1 + optimum/intel/ipex/modeling_base.py | 6 +----- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/optimum/exporters/ipex/modeling_utils.py b/optimum/exporters/ipex/modeling_utils.py index 9fdfef3125..b8e92be63e 100755 --- a/optimum/exporters/ipex/modeling_utils.py +++ b/optimum/exporters/ipex/modeling_utils.py @@ -354,6 +354,7 @@ def _falcon_model_forward( attentions=all_self_attentions, ) + def _gpt2_model_forward( self, input_ids: Optional[torch.LongTensor] = None, diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 02cde6471f..d34b4f3c42 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -305,11 +305,7 @@ def can_generate(self): return isinstance(self, GenerationMixin) def _call_model(self, *args, **kwargs): - try: - with torch.autocast(self.device.type, self.dtype), torch.no_grad(): - out = self.model(*args, **kwargs) - except RuntimeError: - out = self.model(*args, **kwargs) + out = self.model(*args, **kwargs) return out def _init_warmup(self):