Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions .github/workflows/build_cc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ jobs:
include:
- variant: cpu
dp_variant: cpu
- variant: cuda
dp_variant: cuda
- variant: cuda120
dp_variant: cuda
- variant: rocm
Expand All @@ -36,12 +34,6 @@ jobs:
- uses: lukka/get-cmake@latest
- run: python -m pip install uv
- run: source/install/uv_with_retry.sh pip install --system --group pin_tensorflow_cpu --group pin_pytorch_cpu --torch-backend cpu
- run: |
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \
&& sudo dpkg -i cuda-keyring_1.0-1_all.deb \
&& sudo apt-get update \
&& sudo apt-get -y install cuda-cudart-dev-11-8 cuda-nvcc-11-8
if: matrix.variant == 'cuda'
- run: |
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \
&& sudo dpkg -i cuda-keyring_1.0-1_all.deb \
Expand Down
18 changes: 1 addition & 17 deletions .github/workflows/build_wheel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,7 @@ jobs:
python: 311
platform_id: manylinux_x86_64
dp_variant: cuda
cuda_version: 12.2
- os: ubuntu-latest
python: 311
platform_id: manylinux_x86_64
dp_variant: cuda
cuda_version: 11.8
dp_pkg_name: deepmd-kit-cu11
cuda_version: 12.8
# macos-x86-64
- os: macos-15-intel
python: 311
Expand Down Expand Up @@ -64,14 +58,6 @@ jobs:
- name: Install uv
run: curl --proto '=https' --tlsv1.2 -LsSf https://github.com/astral-sh/uv/releases/download/0.2.24/uv-installer.sh | sh
if: runner.os != 'Linux'
- uses: docker/setup-qemu-action@v3
name: Setup QEMU
if: matrix.platform_id == 'manylinux_aarch64' && matrix.os == 'ubuntu-latest'
# detect version in advance. See #3168
- run: |
echo "SETUPTOOLS_SCM_PRETEND_VERSION=$(pipx run uv tool run --from setuptools_scm python -m setuptools_scm)" >> $GITHUB_ENV
rm -rf .git
if: matrix.dp_pkg_name == 'deepmd-kit-cu11'
- name: Build wheels
uses: pypa/cibuildwheel@v3.3
env:
Expand Down Expand Up @@ -126,8 +112,6 @@ jobs:
include:
- variant: ""
cuda_version: "12"
- variant: "_cu11"
cuda_version: "11"
steps:
- name: Delete huge unnecessary tools folder
run: rm -rf /opt/hostedtoolcache
Expand Down
3 changes: 0 additions & 3 deletions .github/workflows/package_c.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@ jobs:
- tensorflow_build_version: "2.18"
tensorflow_version: ""
filename: libdeepmd_c.tar.gz
- tensorflow_build_version: "2.14"
tensorflow_version: ">=2.5.0,<2.15"
filename: libdeepmd_c_cu11.tar.gz
steps:
- uses: actions/checkout@v6
with:
Expand Down
3 changes: 0 additions & 3 deletions backend/find_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,6 @@ def get_pt_requirement(pt_version: str = "") -> dict:
cibw_requirement = read_dependencies_from_dependency_group(
"pin_pytorch_cpu"
)
elif cuda_version in SpecifierSet(">=11,<12"):
# CUDA 11.8, cudnn 8
pt_version = "2.3.1"
else:
raise RuntimeError("Unsupported CUDA version") from None
if pt_version == "":
Expand Down
8 changes: 0 additions & 8 deletions backend/find_tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,14 +99,6 @@ def find_tensorflow() -> tuple[Optional[str], list[str]]:
requires.extend(
read_dependencies_from_dependency_group("pin_tensorflow_cpu")
)
elif cuda_version in SpecifierSet(">=11,<12"):
# CUDA 11.8, cudnn 8
requires.extend(
[
"tensorflow-cpu>=2.5.0,<2.15; platform_machine=='x86_64' and platform_system == 'Linux'",
]
)
tf_version = "2.14.1"
else:
raise RuntimeError("Unsupported CUDA version") from None
requires.extend(get_tf_requirement(tf_version)["cpu"])
Expand Down
2 changes: 0 additions & 2 deletions doc/install/easy-install-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ The [`devel` tag](https://github.com/deepmodeling/deepmd-kit/pkgs/container/deep
docker pull ghcr.io/deepmodeling/deepmd-kit:devel
```

For CUDA 11.8 support, use the `devel_cu11` tag.

## Install with pip

Follow [the documentation for the stable version](easy-install.md#install-python-interface-with-pip), but add `--pre` and `--extra-index-url` options like below:
Expand Down
29 changes: 0 additions & 29 deletions doc/install/easy-install.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,14 +96,6 @@ pip install deepmd-kit[gpu,cu12]

::::

::::{tab-item} CUDA 11

```bash
pip install deepmd-kit-cu11[gpu,cu11]
```

::::

::::{tab-item} CPU

```bash
Expand All @@ -128,15 +120,6 @@ pip install deepmd-kit[torch]

::::

::::{tab-item} CUDA 11.8

```bash
pip install torch --index-url https://download.pytorch.org/whl/cu118
pip install deepmd-kit-cu11
```

::::

::::{tab-item} CPU

```bash
Expand Down Expand Up @@ -194,18 +177,6 @@ pip install deepmd-kit

::::

::::{tab-item} CUDA 11.8

```bash
# release version
pip install paddlepaddle-gpu==3.1.1 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
# nightly-build version
# pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu118/
pip install deepmd-kit
```

::::

::::{tab-item} CPU

```bash
Expand Down
2 changes: 1 addition & 1 deletion doc/install/install-from-c-library.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ wget https://github.com/deepmodeling/deepmd-kit/releases/latest/download/libdeep
tar xzf libdeepmd_c.tar.gz
```

The library is built in Linux (GLIBC 2.17) with CUDA 12.2 (`libdeepmd_c.tar.gz`) or 11.8 (`libdeepmd_c_cu11.tar.gz`). It's noted that this package does not contain CUDA Toolkit and cuDNN, so one needs to download them from the NVIDIA website.
The library is built in Linux (GLIBC 2.17) with CUDA 12.2 (`libdeepmd_c.tar.gz`). It's noted that this package does not contain CUDA Toolkit and cuDNN, so one needs to download them from the NVIDIA website.

## Use Pre-compiled C Library to build the LAMMPS plugin, i-PI driver, and GROMACS patch

Expand Down
10 changes: 1 addition & 9 deletions doc/install/install-from-source.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,6 @@ pip install paddlepaddle-gpu==3.1.1 -i https://www.paddlepaddle.org.cn/packages/
# nightly-build version
# pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/

# cu118
# release version
pip install paddlepaddle-gpu==3.1.1 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
# nightly-build version
# pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu118/

# cpu
# release version
pip install paddlepaddle==3.1.1 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/
Expand Down Expand Up @@ -355,9 +349,7 @@ download the TensorFlow C library from [this page](https://www.tensorflow.org/in

If you want to use C++ interface of Paddle, you need to compile the Paddle inference library(C++ interface) manually from the [linux-compile-by-make](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/compile/linux-compile-by-make.html), then use the `.so` and `.a` files in `Paddle/build/paddle_inference_install_dir/`.

We also provide a weekly-build Paddle C++ inference library for Linux x86_64 with CUDA 11.8/12.3/CPU below:

CUDA 11.8: [Cuda118_cudnn860_Trt8531_D1/latest/paddle_inference.tgz](https://paddle-qa.bj.bcebos.com/paddle-pipeline/GITHUB_Docker_Compile_Test_Cuda118_cudnn860_Trt8531_D1/latest/paddle_inference.tgz)
We also provide a weekly-build Paddle C++ inference library for Linux x86_64 with CUDA 12.3/CPU below:

CUDA 12.3: [Cuda123_cudnn900_Trt8616_D1/latest/paddle_inference.tgz](https://paddle-qa.bj.bcebos.com/paddle-pipeline/GITHUB_Docker_Compile_Test_Cuda123_cudnn900_Trt8616_D1/latest/paddle_inference.tgz)

Expand Down
20 changes: 1 addition & 19 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -117,16 +117,6 @@ ipi = [
gui = [
"dpgui",
]
cu11 = [
"nvidia-cuda-runtime-cu11",
"nvidia-cublas-cu11",
"nvidia-cufft-cu11",
"nvidia-curand-cu11",
"nvidia-cusolver-cu11",
"nvidia-cusparse-cu11",
"nvidia-cudnn-cu11<9",
"nvidia-cuda-nvcc-cu11",
]
cu12 = [
"nvidia-cuda-runtime-cu12",
"nvidia-cublas-cu12",
Expand Down Expand Up @@ -255,9 +245,7 @@ test-command = [
test-extras = ["cpu", "test", "lmp", "ipi", "torch", "paddle"]
build = ["cp311-*"]
skip = ["*-win32", "*-manylinux_i686", "*-musllinux*"]
# TODO: uncomment to use the latest image when CUDA 11 is deprecated
# manylinux-x86_64-image = "manylinux_2_28"
manylinux-x86_64-image = "quay.io/pypa/manylinux_2_28_x86_64:2022-11-19-1b19e81"
manylinux-x86_64-image = "manylinux_2_28"
manylinux-aarch64-image = "manylinux_2_28"

[tool.cibuildwheel.macos]
Expand Down Expand Up @@ -289,15 +277,9 @@ environment-pass = [
]
before-all = [
"""if [ ! -z "${DP_PKG_NAME}" ]; then sed -i "s/name = \\"deepmd-kit\\"/name = \\"${DP_PKG_NAME}\\"/g" pyproject.toml; fi""",
# https://almalinux.org/blog/2023-12-20-almalinux-8-key-update/
"""rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux""",
"""{ if [ "$(uname -m)" = "x86_64" ] ; then yum config-manager --add-repo http://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo && yum install -y cuda-nvcc-${CUDA_VERSION/./-} cuda-cudart-devel-${CUDA_VERSION/./-}; fi }""",
# uv is not available in the old manylinux image
"""{ if [ "$(uname -m)" = "x86_64" ] ; then pipx install uv; fi }""",
]
before-build = [
# old build doesn't support uv
"""{ if [ "$(uname -m)" = "x86_64" ] ; then uv pip install --system -U build; fi }""",
]
[tool.cibuildwheel.linux.environment]
PIP_PREFER_BINARY = "1"
Expand Down
3 changes: 1 addition & 2 deletions source/install/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ ENV PATH="/opt/deepmd-kit/bin:$PATH"
ENV VIRTUAL_ENV="/opt/deepmd-kit"
# Install package
COPY dist /dist
RUN if [ "${CUDA_VERSION}" = 11 ]; then export UV_TORCH_BACKEND=cu118; fi \
&& uv pip install "$(ls /dist/deepmd_kit${VARIANT}-*manylinux*_x86_64.whl)[gpu,cu${CUDA_VERSION},lmp,ipi,torch]" \
RUN uv pip install "$(ls /dist/deepmd_kit${VARIANT}-*manylinux*_x86_64.whl)[gpu,cu${CUDA_VERSION},lmp,ipi,torch]" \
&& dp -h \
&& lmp -h \
&& dp_ipi \
Expand Down