Skip to content

Commit 7db0be1

Browse files
JRosenkranzflaviabeo
authored andcommitted
removed HF_HOME as we are now matching huggingface cache implementation in fms
Signed-off-by: Joshua Rosenkranz <jmrosenk@us.ibm.com>
1 parent cc180c2 commit 7db0be1

File tree

3 files changed

+1
-21
lines changed

3 files changed

+1
-21
lines changed

tests/models/test_decoders.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,7 @@
3434
except ImportError:
3535
GPTQ_ENABLED = False
3636

37-
ORIGINAL_HF_HOME = os.environ.get("HF_HOME", None)
38-
MICRO_MODELS_HOME = os.environ.get("FMS_TEST_SHAPES_MICRO_MODELS_HOME", "/mnt/home")
37+
MICRO_MODELS_HOME = os.environ.get("FMS_TEST_SHAPES_MICRO_MODELS_HOME", "/mnt/home/models/tiny-models")
3938

4039
# Add models to test here
4140
LLAMA_3p1_8B_INSTRUCT = "meta-llama/Llama-3.1-8B-Instruct"
@@ -175,10 +174,6 @@ def reset_compiler():
175174
torch.compiler.reset()
176175
torch._dynamo.reset()
177176
os.environ.pop("COMPILATION_MODE", None)
178-
if ORIGINAL_HF_HOME is None:
179-
os.environ.pop("HF_HOME", None)
180-
else:
181-
os.environ["HF_HOME"] = ORIGINAL_HF_HOME
182177

183178

184179
# TODO: Currently, gptq does not have the same level of support as non-gptq models for get_model. This method provides the extra requirements for gptq for get_model,
@@ -320,9 +315,6 @@ def test_common_shapes(model_path, batch_size, seq_length, max_new_tokens):
320315
torch.manual_seed(42)
321316
os.environ["COMPILATION_MODE"] = "offline_decoder"
322317

323-
if "HF_HOME" not in os.environ:
324-
os.environ["HF_HOME"] = "/tmp/models/hf_cache"
325-
326318
dprint(
327319
f"testing model={model_path}, batch_size={batch_size}, seq_length={seq_length}, max_new_tokens={max_new_tokens}, micro_model={USE_MICRO_MODELS}"
328320
)

tests/models/test_encoders.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,6 @@
1010
import os
1111
import numpy as np
1212

13-
ORIGINAL_HF_HOME = os.environ.get("HF_HOME", None)
14-
1513
# Add models to test here
1614
ROBERTA_SQUAD_V2 = "deepset/roberta-base-squad2"
1715

@@ -81,17 +79,10 @@ def reset_compiler():
8179
torch.compiler.reset()
8280
torch._dynamo.reset()
8381
os.environ.pop('COMPILATION_MODE', None)
84-
if ORIGINAL_HF_HOME is None:
85-
os.environ.pop('HF_HOME', None)
86-
else:
87-
os.environ['HF_HOME'] = ORIGINAL_HF_HOME
8882

8983
@pytest.mark.parametrize("model_path,batch_size,seq_length", common_shapes)
9084
def test_common_shapes(model_path, batch_size, seq_length):
9185
os.environ["COMPILATION_MODE"] = "offline"
92-
93-
if "HF_HOME" not in os.environ:
94-
os.environ["HF_HOME"] = "/tmp/models/hf_cache"
9586

9687
dprint(f"testing model={model_path}, batch_size={batch_size}, seq_length={seq_length}")
9788

tests/models/test_model_expectations.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,6 @@
1313

1414
os.environ["COMPILATION_MODE"] = "offline"
1515

16-
if "HF_HOME" not in os.environ:
17-
os.environ["HF_HOME"] = "/tmp/models/hf_cache"
18-
1916
model_dir = os.environ.get("FMS_TESTING_MODEL_DIR", "/tmp/models")
2017
LLAMA_3p1_8B_INSTRUCT = "meta-llama/Llama-3.1-8B-Instruct"
2118
GRANITE_3p2_8B_INSTRUCT = "ibm-granite/granite-3.2-8b-instruct"

0 commit comments

Comments
 (0)