Skip to content

Commit a827b35

Browse files
amdfaazhewenl
authored andcommitted
[CI/Build] Install uv for AMD MI300: Language Models Tests (Hybrid) %N (vllm-project#28142)
Signed-off-by: amdfaa <107946068+amdfaa@users.noreply.github.com> Signed-off-by: zhewenli <zhewenli@meta.com> Co-authored-by: zhewenli <zhewenli@meta.com>
1 parent 1422e28 commit a827b35

File tree

1 file changed

+27
-8
lines changed

1 file changed

+27
-8
lines changed

docker/Dockerfile.rocm

Lines changed: 27 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,20 @@ RUN apt-get update -q -y && apt-get install -q -y \
1515
# Remove sccache
1616
RUN python3 -m pip install --upgrade pip
1717
RUN apt-get purge -y sccache; python3 -m pip uninstall -y sccache; rm -f "$(which sccache)"
18+
19+
# Install UV
20+
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
21+
22+
# Activate virtual environment and add uv to PATH
23+
ENV PATH="/root/.local/bin:$PATH"
24+
25+
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
26+
# Reference: https://github.com/astral-sh/uv/pull/1694
27+
ENV UV_HTTP_TIMEOUT=500
28+
ENV UV_INDEX_STRATEGY="unsafe-best-match"
29+
# Use copy mode to avoid hardlink failures with Docker cache mounts
30+
ENV UV_LINK_MODE=copy
31+
1832
ARG COMMON_WORKDIR
1933
WORKDIR ${COMMON_WORKDIR}
2034

@@ -59,13 +73,15 @@ FROM base AS test
5973

6074
RUN python3 -m pip install --upgrade pip && rm -rf /var/lib/apt/lists/*
6175

62-
# Install vLLM
76+
# Install vLLM using uv (inherited from base stage)
77+
# Note: No -U flag to avoid upgrading PyTorch ROCm to CUDA version
6378
RUN --mount=type=bind,from=export_vllm,src=/,target=/install \
79+
--mount=type=cache,target=/root/.cache/uv \
6480
cd /install \
65-
&& pip install -U -r requirements/rocm.txt \
66-
&& pip install -U -r requirements/rocm-test.txt \
81+
&& uv pip install --system -r requirements/rocm.txt \
82+
&& uv pip install --system -r requirements/rocm-test.txt \
6783
&& pip uninstall -y vllm \
68-
&& pip install *.whl
84+
&& uv pip install --system *.whl
6985

7086
WORKDIR /vllm-workspace
7187
ARG COMMON_WORKDIR
@@ -89,14 +105,17 @@ RUN case "$(which python3)" in \
89105
rm -rf /opt/conda/envs/py_3.9/lib/python3.9/site-packages/numpy-1.20.3.dist-info/;; \
90106
*) ;; esac
91107

92-
RUN python3 -m pip install --upgrade huggingface-hub[cli]
108+
RUN --mount=type=cache,target=/root/.cache/uv \
109+
uv pip install --system --upgrade huggingface-hub[cli]
93110

94-
# Install vLLM
111+
# Install vLLM using uv (inherited from base stage)
112+
# Note: No -U flag to avoid upgrading PyTorch ROCm to CUDA version
95113
RUN --mount=type=bind,from=export_vllm,src=/,target=/install \
114+
--mount=type=cache,target=/root/.cache/uv \
96115
cd /install \
97-
&& pip install -U -r requirements/rocm.txt \
116+
&& uv pip install --system -r requirements/rocm.txt \
98117
&& pip uninstall -y vllm \
99-
&& pip install *.whl
118+
&& uv pip install --system *.whl
100119

101120
ARG COMMON_WORKDIR
102121

0 commit comments

Comments
 (0)