Skip to content

Commit 318bd3d

Browse files
committed
clean up
Signed-off-by: Dan Huang <dahuang@redhat.com>
1 parent 264fdcb commit 318bd3d

File tree

1 file changed

+12
-33
lines changed

1 file changed

+12
-33
lines changed

tests/e2e/vLLM/test_vllm.py

Lines changed: 12 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -16,27 +16,20 @@
1616
from tests.testing_utils import requires_gpu
1717

1818

19-
def is_quay_image(url: str) -> bool:
20-
pattern = r"^quay\.io/[a-z0-9][a-z0-9-_]*/[a-z0-9][a-z0-9-_/]*:[\w][\w.-]*$"
21-
return re.match(pattern, url) is not None
22-
2319
HF_MODEL_HUB_NAME = "nm-testing"
2420

2521
TEST_DATA_FILE = os.environ.get(
2622
"TEST_DATA_FILE", "tests/e2e/vLLM/configs/int8_dynamic_per_token.yaml"
2723
)
2824
SKIP_HF_UPLOAD = os.environ.get("SKIP_HF_UPLOAD", "")
29-
# vllm environment: same (default), the path of vllm virtualenv, image url, deployed runner name
25+
# vllm environment: same (default), the path of vllm virtualenv, deployed runner name
3026
VLLM_PYTHON_ENV = os.environ.get("VLLM_PYTHON_ENV", "same")
3127
IS_VLLM_IMAGE = False
32-
IS_VLLM_IMAGE_DEPLOYED=False
3328
RUN_SAVE_DIR=os.environ.get("RUN_SAVE_DIR", "none")
3429
# when using vllm image, needs to save the generated model
3530
if VLLM_PYTHON_ENV.lower() != "same" and (not Path(VLLM_PYTHON_ENV).exists()):
3631
IS_VLLM_IMAGE = True
37-
if not is_quay_image(VLLM_PYTHON_ENV):
38-
IS_VLLM_IMAGE_DEPLOYED = True
39-
assert RUN_SAVE_DIR != "none", "To use vllm image, RUN_SAVE_DIR must be set!"
32+
assert RUN_SAVE_DIR != "none", "To use vllm image, RUN_SAVE_DIR must be set!"
4033

4134
TIMINGS_DIR = os.environ.get("TIMINGS_DIR", "timings/e2e-test_vllm")
4235
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
@@ -246,30 +239,16 @@ def _run_vllm(self, logger):
246239
""")
247240
os.chmod(self.vllm_bash, 0o755)
248241
logger.info(f"Wrote vllm cmd into {self.vllm_bash}:")
249-
if IS_VLLM_IMAGE_DEPLOYED:
250-
logger.info("vllm image is deployed. Run vllm cmd with kubectl.")
251-
result = subprocess.Popen(
252-
[
253-
"kubectl", "exec", "-it",
254-
VLLM_PYTHON_ENV, "-n", "arc-runners",
255-
"--", "/bin/bash", self.vllm_bash,
256-
],
257-
stdout=subprocess.PIPE,
258-
stderr=subprocess.PIPE,
259-
text=True)
260-
else:
261-
logger.info("vllm image is pulled locally. Run vllm cmd with podman.")
262-
result = subprocess.Popen(
263-
[
264-
"podman", "run", "--rm",
265-
"--device", "nvidia.com/gpu=all", "--entrypoint",
266-
self.vllm_bash, "-v",
267-
f"{RUN_SAVE_DIR}:{RUN_SAVE_DIR}",
268-
VLLM_PYTHON_ENV,
269-
],
270-
stdout=subprocess.PIPE,
271-
stderr=subprocess.PIPE,
272-
text=True)
242+
logger.info("vllm image. Run vllm cmd with kubectl.")
243+
result = subprocess.Popen(
244+
[
245+
"kubectl", "exec", "-it",
246+
VLLM_PYTHON_ENV, "-n", "arc-runners",
247+
"--", "/bin/bash", self.vllm_bash,
248+
],
249+
stdout=subprocess.PIPE,
250+
stderr=subprocess.PIPE,
251+
text=True)
273252
else:
274253
run_file_path = os.path.join(test_file_dir, "run_vllm.py")
275254
logger.info("Run vllm in subprocess.Popen using python env:")

0 commit comments

Comments
 (0)