diff --git a/.github/cloud_builder/run_command_on_active_checkout.yaml b/.github/cloud_builder/run_command_on_active_checkout.yaml index d0e407f02..4ffd6ed5c 100644 --- a/.github/cloud_builder/run_command_on_active_checkout.yaml +++ b/.github/cloud_builder/run_command_on_active_checkout.yaml @@ -3,7 +3,7 @@ substitutions: options: logging: CLOUD_LOGGING_ONLY steps: - - name: us-central1-docker.pkg.dev/external-snap-ci-github-gigl/gigl-base-images/gigl-builder:b34c863a2168c8df5a6da1f6385e5d374f0175d2.91.1 + - name: us-central1-docker.pkg.dev/external-snap-ci-github-gigl/gigl-base-images/gigl-builder:3738af6cca02750514278cb63c98c88d07c45f7b.99.1 entrypoint: /bin/bash args: - -c @@ -28,8 +28,9 @@ steps: docker buildx create --driver=docker-container --use docker run --rm --privileged multiarch/qemu-user-static --reset -p yes gcloud auth configure-docker us-central1-docker.pkg.dev - # Install GiGL - uv pip install -e . + # Install GiGL (non-editable). gigl-core (the C++ extension workspace member) + # is resolved transitively and built via scikit-build-core. + uv pip install . # The builder operates in its own user dir, usually /workspace, # so we need to copy the gigl tools dir to the current cloud_builder's user dir. # See: containers/Dockerfile.builder. diff --git a/.github/workflows/on-pr-comment.yml b/.github/workflows/on-pr-comment.yml index 870c415fe..53ab9b146 100644 --- a/.github/workflows/on-pr-comment.yml +++ b/.github/workflows/on-pr-comment.yml @@ -64,6 +64,23 @@ jobs: command: | make unit_test_py + unit-test-cpp: + if: ${{ github.event.issue.pull_request && (contains(github.event.comment.body, '/unit_test_cpp') || endsWith(github.event.comment.body, '/unit_test') || contains(github.event.comment.body, '/all_test')) }} + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - name: Run C++ Unit Tests + uses: snapchat/gigl/.github/actions/run-command-on-pr@main + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + pr_number: ${{ github.event.issue.number }} + should_leave_progress_comments: "true" + descriptive_workflow_name: "C++ Unit Test" + use_cloud_run: "false" + command: | + bash requirements/install_cpp_deps.sh + make unit_test_cpp + unit-test-scala: if: ${{ github.event.issue.pull_request && (contains(github.event.comment.body, '/unit_test_scala') || endsWith(github.event.comment.body, '/unit_test') || contains(github.event.comment.body, '/all_test')) }} runs-on: ubuntu-latest diff --git a/.github/workflows/on-pr-merge.yml b/.github/workflows/on-pr-merge.yml index 0e1f9ddd0..68ce947f9 100644 --- a/.github/workflows/on-pr-merge.yml +++ b/.github/workflows/on-pr-merge.yml @@ -70,6 +70,16 @@ jobs: service_account: ${{ secrets.gcp_service_account_email }} project: ${{ vars.GCP_PROJECT_ID }} + ci-unit-test-cpp: + if: github.event_name == 'merge_group' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install C++ dependencies + run: bash requirements/install_cpp_deps.sh + - name: Run C++ Unit Tests + run: make unit_test_cpp + ci-integration-test: if: github.event_name == 'merge_group' runs-on: ubuntu-latest diff --git a/.github/workflows/release-documentation.yml b/.github/workflows/release-documentation.yml index 044492888..4b81630d1 100644 --- a/.github/workflows/release-documentation.yml +++ b/.github/workflows/release-documentation.yml @@ -41,11 +41,13 @@ jobs: gcp_project_id: ${{ vars.GCP_PROJECT_ID }} workload_identity_provider: ${{ secrets.WORKLOAD_IDENTITY_PROVIDER }} gcp_service_account_email: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} - # We also make gigl available w/ editable install `-e` so that autodoc can find it. + # Install gigl so Sphinx autoapi can import and introspect it. Non-editable + # install; autodoc reads from site-packages. gigl-core (the C++ extension + # workspace member) is resolved transitively and built via scikit-build-core. - name: Install necessary doc dependencies run: | uv sync --group docs --inexact - uv pip install -e . + uv pip install . - name: Sphinx build run: | make build_docs diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 777e68183..9b6a292bc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,10 @@ name: Release GiGL +# TODO: Before the first release using this workflow, complete the following one-time GCP setup: +# 1. Create the `gigl-cu128` Artifact Registry Python repository in GCP project +# `external-snap-ci-github-gigl` (the CPU registry `gigl` already exists). +# 2. Grant the release service account write access to the new `gigl-cu128` repository. + on: # Triggers the workflow manually for now until we have full support for releasing: # - building and releasing docker images @@ -11,13 +16,21 @@ permissions: id-token: write # Needed for the auth w/ GCP to upload to Google Artifact Registry. jobs: - build: - name: Build and release pip whl - runs-on: ubuntu-latest + build-gigl-core: + name: Build and release gigl-core (${{ matrix.torch-variant }}) + runs-on: ${{ matrix.runner }} + strategy: + matrix: + include: + - runner: ubuntu-latest + torch-variant: cpu + index-name: gcp-release-registry-cpu + - runner: gigl-gpu-instances + torch-variant: cu128 + index-name: gcp-release-registry-cu128 env: PROJECT_ID: ${{ vars.GCP_PROJECT_ID }} environment: - # This CI environment contains relevant pip.conf and pyprci information to name: release steps: - name: Checkout @@ -31,29 +44,55 @@ jobs: gcp_project_id: ${{ vars.GCP_PROJECT_ID }} workload_identity_provider: ${{ secrets.WORKLOAD_IDENTITY_PROVIDER }} gcp_service_account_email: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} - # We need build and twine to build the whl and upload it to Google Artifact Registry. - # keyrings.google-artifactregistry-auth is needed to authenticate with Google Artifact Registry. - # See: https://cloud.google.com/artifact-registry/docs/python/store-python - # And: https://cloud.google.com/artifact-registry/docs/python/authentication + - name: Setup environment for publishing Python package run: | - # Pre-install keyring and Artifact Registry plugin from the public PyPI uv tool install keyring --with keyrings.google-artifactregistry-auth==1.1.2 - - name: Build Whl Distribution - run: uv build + - name: Build gigl-core Whl Distribution + run: uv build --wheel --package gigl-core + + - name: Publish gigl-core Package šŸš€ + run: | + uv publish --index ${{ matrix.index-name }} --username oauth2accesstoken --keyring-provider subprocess + + build: + name: Build and release gigl (${{ matrix.torch-variant }}) + needs: build-gigl-core + runs-on: ${{ matrix.runner }} + strategy: + matrix: + include: + - runner: ubuntu-latest + torch-variant: cpu + index-name: gcp-release-registry-cpu + - runner: gigl-gpu-instances + torch-variant: cu128 + index-name: gcp-release-registry-cu128 + env: + PROJECT_ID: ${{ vars.GCP_PROJECT_ID }} + environment: + name: release + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python deps and gcloud + uses: ./.github/actions/setup-python-tools + with: + install_dev_deps: "true" + setup_gcloud: "true" + gcp_project_id: ${{ vars.GCP_PROJECT_ID }} + workload_identity_provider: ${{ secrets.WORKLOAD_IDENTITY_PROVIDER }} + gcp_service_account_email: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} - - name: Publish Package šŸš€ - env: - PYPIRC_CONTENTS: ${{ secrets.PYPIRC_CONTENTS }} - PIP_CONF_CONTENTS: ${{ secrets.PIP_CONF_CONTENTS }} - # We upload the build whls to Google Artifact Registry. + - name: Setup environment for publishing Python package run: | - uv publish --index gcp-release-registry --username oauth2accesstoken --keyring-provider subprocess + uv tool install keyring --with keyrings.google-artifactregistry-auth==1.1.2 + + - name: Build gigl Whl Distribution + run: uv build --wheel --package gigl - - name: Post Publish Package - if: always() - # Clean up files created during Publish Package step. + - name: Publish gigl Package šŸš€ run: | - rm -rf ~/.pypirc - rm -rf ~/.pip/pip.conf + uv publish --index ${{ matrix.index-name }} --username oauth2accesstoken --keyring-provider subprocess diff --git a/.gitignore b/.gitignore index 26bb8b2e8..1c014471b 100644 --- a/.gitignore +++ b/.gitignore @@ -49,5 +49,22 @@ fossa*.zip # https://github.com/google-github-actions/auth/issues/497 gha-creds-*.json +# gigl-core build outputs. scikit-build-core writes the proper wheel build into +# gigl-core/build/{wheel_tag}/; the editable-install cmake invocation additionally +# drops cache files at gigl-core/ root (CMakeCache.txt, .cmake/, build.ninja, etc). +gigl-core/build/ +gigl-core/CMakeCache.txt +gigl-core/CMakeFiles/ +gigl-core/CMakeInit.txt +gigl-core/cmake_install.cmake +gigl-core/build.ninja +gigl-core/.cmake/ +gigl-core/.ninja_log +gigl-core/.ninja_deps +gigl-core/.skbuild-info.json +gigl-core/compile_commands.json +gigl-core/_core*.so +gigl-core/src/gigl_core/*.so + # Local-only scripts with hardcoded internal identifiers scripts/_local/ diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index ec343c7d5..000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -global-exclude tests/* -recursive-include gigl/deps *.jar diff --git a/Makefile b/Makefile index dc3accd94..208b7a2d3 100644 --- a/Makefile +++ b/Makefile @@ -22,6 +22,13 @@ DOCKER_IMAGE_MAIN_CPU_NAME_WITH_TAG?=${DOCKER_IMAGE_MAIN_CPU_NAME}:${DATE} DOCKER_IMAGE_DEV_WORKBENCH_NAME_WITH_TAG?=${DOCKER_IMAGE_DEV_WORKBENCH_NAME}:${DATE} PYTHON_DIRS:=.github/scripts examples gigl tests snapchat scripts +CPP_SOURCES:=$(shell find gigl-core/src gigl-core/tests \( -name "*.cpp" -o -name "*.cu" -o -name "*.h" -o -name "*.cuh" \) 2>/dev/null) +# clang-tidy 15 does not fully support CUDA syntax (e.g. <<<...>>>, __global__). +# Exclude .cu/.cuh files from tidy targets; clang-format and clangd handle them fine. +CPP_SOURCES_NO_CUDA:=$(filter-out %.cu %.cuh,$(CPP_SOURCES)) +# scikit-build-core writes compile_commands.json into gigl-core/build/{wheel_tag}/ +# during `uv sync`. Resolved lazily at recipe time so the wildcard sees the latest build. +CPP_COMPILE_COMMANDS_DIR=$(firstword $(wildcard gigl-core/build/*/)) PY_TEST_FILES?="*_test.py" # You can override GIGL_TEST_DEFAULT_RESOURCE_CONFIG by setting it in your environment i.e. # adding `export GIGL_TEST_DEFAULT_RESOURCE_CONFIG=your_resource_config` to your shell config (~/.bashrc, ~/.zshrc, etc.) @@ -48,7 +55,8 @@ check_if_valid_env: install_dev_deps: check_if_valid_env gcloud auth configure-docker us-central1-docker.pkg.dev bash ./requirements/install_py_deps.sh --dev - bash ./requirements/install_scala_deps.sh + # bash ./requirements/install_scala_deps.sh + # bash ./requirements/install_cpp_deps.sh uv pip install -e . uv run pre-commit install --hook-type pre-commit --hook-type pre-push @@ -86,6 +94,15 @@ unit_test_scala: clean_build_files_scala ( cd scala; sbt test ) ( cd scala_spark35 ; sbt test ) +# Builds and runs gigl-core's C++ unit tests (GoogleTest + CTest). scikit-build-core +# is not involved here — we invoke cmake directly against gigl-core/ so we can +# flip -DGIGL_CORE_BUILD_TESTS=ON to wire in the tests subdirectory. +unit_test_cpp: + cd gigl-core && uv run cmake -S . -B build/tests -DGIGL_CORE_BUILD_TESTS=ON \ + -DCMAKE_PREFIX_PATH=$$(uv run python -c "import sysconfig; print(sysconfig.get_paths()['purelib'])") + cd gigl-core && uv run cmake --build build/tests --parallel + cd gigl-core && uv run ctest --test-dir build/tests --output-on-failure + # Runs unit tests for Python and Scala # Asserts Python and Scala files are formatted correctly. # Asserts YAML configs can be parsed. @@ -94,7 +111,8 @@ unit_test_scala: clean_build_files_scala # Eventually, we should look into splitting these up. # We run `make check_format` separately instead of as a dependent make rule so that it always runs after the actual testing. # We don't want to fail the tests due to non-conformant formatting during development. -unit_test: precondition_tests unit_test_py unit_test_scala + +unit_test: precondition_tests unit_test_py unit_test_scala unit_test_cpp check_format_py: uv run ruff check --config pyproject.toml ${PYTHON_DIRS} @@ -108,7 +126,12 @@ check_format_md: @echo "Checking markdown files..." uv run mdformat --check ${MD_FILES} -check_format: check_format_py check_format_scala check_format_md +check_format_cpp: + clang-format-15 --dry-run --Werror --style=file $(CPP_SOURCES) + +# Checks formatting only (clang-format, black, scalafmt, mdformat). Does NOT run +# clang-tidy static analysis — use `make check_lint_cpp` for that. +check_format: check_format_py check_format_cpp check_format_scala check_format_md # Set PY_TEST_FILES= to test a specifc file. # Ex. `make integration_test PY_TEST_FILES="dataflow_test.py"` @@ -141,14 +164,42 @@ format_md: @echo "Formatting markdown files..." uv run mdformat ${MD_FILES} -format: format_py format_scala format_md +format_cpp: + clang-format-15 -i --style=file $(CPP_SOURCES) + +format: format_py format_cpp format_scala format_md type_check: uv run mypy ${PYTHON_DIRS} --check-untyped-defs -lint_test: check_format assert_yaml_configs_parse +marco: + echo "polo" + touch marco + +# scikit-build-core writes compile_commands.json into gigl-core/build/{wheel_tag}/ +# during `uv sync` / `uv pip install -e ./gigl-core`. clang-tidy reads it directly. +# If the path is empty (gigl-core has never been built), `uv sync` populates it. +check_lint_cpp: + @test -n "$(CPP_COMPILE_COMMANDS_DIR)" || (echo "No gigl-core build dir found. Run 'uv sync' first."; exit 1) + clang-tidy-15 -p $(CPP_COMPILE_COMMANDS_DIR) $(CPP_SOURCES_NO_CUDA) + +# Not part of `make format`: clang-tidy --fix rewrites logic (renames identifiers, +# changes expressions, adds/removes keywords), not just style. Run manually and +# review the diff before committing. Note: --fix cannot auto-repair every check; +# some violations require manual edits. +fix_lint_cpp: + @test -n "$(CPP_COMPILE_COMMANDS_DIR)" || (echo "No gigl-core build dir found. Run 'uv sync' first."; exit 1) + clang-tidy-15 --fix -p $(CPP_COMPILE_COMMANDS_DIR) $(CPP_SOURCES_NO_CUDA) + +lint_test: check_format assert_yaml_configs_parse check_lint_cpp @echo "Lint checks pass!" +# Wipe gigl-core's build cache. Use this if cmake's cached state becomes inconsistent +# after switching between branches with substantially different CMakeLists.txt structure. +clean_cpp: + rm -rf gigl-core/build + rm -f gigl-core/src/gigl_core/*.so + # compiles current working state of scala projects to local jars compile_jars: @echo "Compiling jars..." @@ -311,7 +362,10 @@ clean_build_files_scala: ( cd scala; sbt clean; find . -type d -name "target" -prune -exec rm -rf {} \; ) ( cd scala_spark35; sbt clean; find . -type d -name "target" -prune -exec rm -rf {} \; ) -clean_build_files: clean_build_files_py clean_build_files_scala +clean_build_files_cpp: + rm -rf gigl-core/build + +clean_build_files: clean_build_files_py clean_build_files_scala clean_build_files_cpp # Call to generate new proto definitions if any of the .proto files have been changed. # We intentionally rebuild *all* protos with one commmand as they should all be in sync. diff --git a/containers/Dockerfile.dataflow.src b/containers/Dockerfile.dataflow.src index b5d29c7f0..360971903 100644 --- a/containers/Dockerfile.dataflow.src +++ b/containers/Dockerfile.dataflow.src @@ -5,14 +5,17 @@ FROM $BASE_IMAGE # Copy the source WORKDIR /gigl -COPY MANIFEST.in MANIFEST.in -COPY pyproject.toml pyproject.toml -COPY uv.lock uv.lock +# Copy everything in one layer. The C++ extension lives in the separate `gigl-core` +# workspace member; `uv pip install .` resolves it from the workspace and builds it +# via scikit-build-core — no separate `make build_cpp_extensions` step needed. +COPY pyproject.toml uv.lock Makefile README.md ./ +COPY gigl-core gigl-core COPY gigl/dep_vars.env gigl/dep_vars.env COPY deployment deployment COPY gigl gigl COPY snapchat snapchat COPY tests tests -RUN uv pip install -e . + +RUN uv pip install . WORKDIR / diff --git a/containers/Dockerfile.src b/containers/Dockerfile.src index b80295962..5e0f3b27e 100644 --- a/containers/Dockerfile.src +++ b/containers/Dockerfile.src @@ -1,17 +1,15 @@ ARG BASE_IMAGE FROM $BASE_IMAGE -# Copy the source -WORKDIR /gigl - - # Note: main package files must live in root of the repo for the python package to be built correctly for Dataflow workers. # See https://beam.apache.org/documentation/sdks/python-pipxeline-dependencies/#create-reproducible-environments. WORKDIR /gigl -COPY MANIFEST.in MANIFEST.in -COPY pyproject.toml pyproject.toml -COPY uv.lock uv.lock +# Copy everything in one layer. The C++ extension lives in the separate `gigl-core` +# workspace member; `uv pip install .` resolves it from the workspace and builds it +# via scikit-build-core — no separate `make build_cpp_extensions` step needed. +COPY pyproject.toml uv.lock Makefile README.md ./ +COPY gigl-core gigl-core COPY gigl/dep_vars.env gigl/dep_vars.env COPY deployment deployment COPY gigl gigl @@ -19,4 +17,4 @@ COPY snapchat snapchat COPY tests tests COPY examples examples -RUN uv pip install -e . +RUN uv pip install . diff --git a/docs/cpp_style_guide.md b/docs/cpp_style_guide.md new file mode 100644 index 000000000..1c9d0dc71 --- /dev/null +++ b/docs/cpp_style_guide.md @@ -0,0 +1,202 @@ +# C++ Style Guide + +GiGL enforces C++ style automatically via two tools: + +- **clang-format** (`.clang-format`) — code formatting +- **clang-tidy** (`.clang-tidy`) — static analysis and lint + +All clang-tidy warnings are treated as errors. + +## Running the Tools + +```bash +make format_cpp # Format all C++ files in-place (clang-format) +make check_format_cpp # Check formatting without modifying (clang-format only, not lint) +make check_lint_cpp # Run clang-tidy static analysis +``` + +> **Note — CUDA files (`.cu`) are excluded from clang-tidy.** clang-tidy 15 does not support CUDA syntax and will error +> on `.cu` files. The Makefile defines `CPP_SOURCES_NO_CUDA` (which filters out `.cu` files) and passes only that set to +> clang-tidy. If you add a new `.cu` file, it will not appear in lint output — this is expected. Lint coverage for CUDA +> files requires upgrading to a clang-tidy version with CUDA support. + +______________________________________________________________________ + +## Building + +All C++ lives in the `gigl-core/` workspace package. `uv sync` (invoked via `make install_dev_deps`) builds gigl-core +via scikit-build-core and installs the resulting `.so` into `gigl-core/src/gigl_core/`. To force a rebuild after editing +a C++ source, run `uv pip install -e ./gigl-core --force-reinstall`. + +- Release (`.github/workflows/release.yml`) builds and publishes `gigl-core` before `gigl`. Each torch variant (cpu, + cu128) goes to a matching GCP artifact registry. +- `make unit_test_cpp` configures `gigl-core/` with `-DGIGL_CORE_BUILD_TESTS=ON`, builds, and runs CTest. + +______________________________________________________________________ + +## Build Configuration + +All builds use `-O3 -g`: full optimization with debug symbols always enabled. Debug symbols add no runtime overhead and +ensure stack traces are always readable. + +______________________________________________________________________ + +## Formatting (`.clang-format`) + +The style is based on LLVM with the following notable deviations: + +### Line length + +``` +ColumnLimit: 120 +``` + +120 columns rather than the LLVM default of 80. ML and graph code tends to have longer identifiers and nested template +types; 120 gives enough room without forcing awkward wraps. + +### Indentation and braces + +``` +IndentWidth: 4 +BreakBeforeBraces: Attach # K&R / "same-line" style +UseTab: Never +IndentCaseLabels: true # case labels indented inside switch +NamespaceIndentation: None # namespace bodies not indented +``` + +### Pointer and reference alignment + +``` +PointerAlignment: Left +``` + +Pointers bind to the type, not the name: `int* x`, not `int *x`. + +### Parameter and argument wrapping + +``` +BinPackArguments: false +BinPackParameters: false +``` + +When a function call or declaration doesn't fit on one line, every argument/parameter gets its own line. Mixed +"bin-packing" (some on one line, some wrapped) is not allowed. + +### Templates + +``` +AlwaysBreakTemplateDeclarations: true +``` + +`template <...>` always appears on its own line, keeping the declaration signature visually separate from the template +header. + +### Include ordering + +Includes are sorted and split into three priority groups: + +| Priority | Pattern | Group | +| -------- | ---------------------- | ------------------------------------------- | +| 1 | `.*` | Local project headers (first) | +| 2 | `^<(torch\|pybind11)/` | Torch and pybind11 headers | +| 3 | `^(<\|"gtest/)` | System and other third-party headers (last) | + +> When GLT (`graphlearn_torch`) headers are added, include `graphlearn_torch` in the Priority 2 pattern. + +### Raw string formatting + +Raw string literals with the `pb` delimiter (e.g. `R"pb(...)pb"`) are formatted as TextProto using Google style, +matching the protobuf idiom used throughout the codebase. + +______________________________________________________________________ + +## Static Analysis (`.clang-tidy`) + +### Check philosophy + +A broad set of check families is enabled to catch bugs, enforce modern C++ idioms, and maintain readability. All +warnings are errors — there is no "warning-only" category. + +Enabled families: + +| Family | What it covers | +| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `boost-use-to-string` | Prefer `std::to_string` over `boost::lexical_cast` for numeric conversions | +| `bugprone-*` | Common programming mistakes: dangling handles, suspicious string construction, assert side effects, etc. | +| `cert-*` | CERT secure coding rules for error handling (`err34-c`), floating-point loops (`flp30-c`), and RNG seeding (`msc32-c`, `msc50/51-cpp`) | +| `clang-diagnostic-*` | Compiler diagnostic warnings surfaced as lint checks (e.g. `-Wall`, `-Wextra` violations) | +| `cppcoreguidelines-*` | C++ Core Guidelines: no raw `malloc`, no union member access, no object slicing, safe downcasts | +| `google-*` | Google C++ style: explicit constructors, no global names in headers, safe `memset` usage | +| `hicpp-exception-baseclass` | All thrown exceptions must derive from `std::exception` | +| `misc-*` | Miscellaneous: header-only definitions, suspicious enum usage, throw-by-value/catch-by-reference, etc. | +| `modernize-*` | Modernize to C++11/14/17: `nullptr`, range-based for, `make_unique`, `using` aliases, etc. | +| `performance-*` | Unnecessary copies, inefficient string ops, missed `emplace`, type promotions in math functions | +| `readability-*` | Naming conventions, braces around statements, boolean simplification, function size limits | + +### Disabled checks + +Some checks in the above families are disabled where they produce excessive noise or conflict with common patterns in +this codebase: + +| Disabled check | Reason | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `bugprone-easily-swappable-parameters` | Tensor and sampler APIs legitimately have many adjacent same-typed parameters | +| `bugprone-implicit-widening-of-multiplication-result` | Crashes clang-tidy 15 on a construct in `ATen/core/dynamic_type.h` (upstream LLVM bug). Re-enable when upgrading past clang-tidy 15. | +| `bugprone-narrowing-conversions` | Too noisy in ML code mixing `int`/`int64_t`/`size_t` for tensor dimensions | +| `misc-confusable-identifiers` | Performs an O(n²) comparison of all identifiers in scope to detect Unicode homoglyphs. PyTorch headers introduce thousands of identifiers, making this check account for ~70% of total lint time. All identifiers in this codebase are standard ASCII. | +| `misc-const-correctness` | Produces false positives with pybind11 types whose mutation happens through `operator[]` (which is non-const). The check incorrectly suggests `const` on variables that are mutated. | +| `misc-no-recursion` | Recursive graph algorithms are intentional | +| `modernize-avoid-c-arrays` | C arrays are needed for pybind11 and C-interop code | +| `modernize-use-trailing-return-type` | Trailing return types (`auto f() -> T`) are only useful when the return type depends on template params. Requiring them everywhere is non-standard and reduces readability. | +| `readability-avoid-const-params-in-decls` | Incorrectly fires on `const T&` parameters in multi-line declarations (clang-tidy 15 bug). The check is meant for top-level const on by-value params, which is a separate, valid concern. | +| `readability-container-contains` | `.contains()` requires C++20; the codebase builds with C++17 | +| `readability-identifier-length` | Short loop variables (`i`, `j`, `k`) are idiomatic | +| `readability-function-cognitive-complexity` | Algorithmic code often requires nesting that is inherent to the problem structure. Enforcing an arbitrary complexity ceiling discourages clarity and encourages artificial decomposition. | +| `readability-magic-numbers` | Literal constants are common in ML code (e.g. feature dimensions) | + +### Naming conventions + +Enforced via `readability-identifier-naming`: + +| Identifier kind | Convention | Example | +| --------------------------------------------------------- | ---------------------------- | ----------------- | +| Classes, enums, unions | `PascalCase` | `DistDataset` | +| Type template parameters | `PascalCase` | `NodeType` | +| Functions, methods | `camelCase` | `sampleNeighbors` | +| Variables, parameters, members | `camelCase` | `numNodes` | +| Private/protected members | `camelCase` with `_` prefix | `_nodeFeatures` | +| Constants (`constexpr`, `const` globals, class constants) | `PascalCase` with `k` prefix | `kMaxBatchSize` | + +> **Note — clang-tidy option names:** `PascalCase` maps to clang-tidy's `CamelCase` enum value; `camelCase` maps to +> `camelBack`. + +### Key option tuning + +| Option | Value | Effect | +| ---------------------------------------------------------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `WarningsAsErrors` | `*` | Every check failure is a hard error in CI | +| `HeaderFilterRegex` | `.*/gigl-core/src/.*` | Scopes checks to our own headers. Using `.*` causes clang-tidy to report warnings from every PyTorch/pybind11 header it parses, flooding output with thousands of third-party issues. | +| `FormatStyle` | `none` | clang-tidy does not auto-reformat; use clang-format separately | +| `bugprone-string-constructor.LargeLengthThreshold` | `8388608` (8 MB) | Strings larger than 8 MB from a length argument are flagged | +| `modernize-loop-convert.NamingStyle` | `camelBack` | Auto-generated loop variable names use camelBack, matching `readability-identifier-naming.VariableCase` | +| `readability-function-size.LineThreshold` | `1000` | Functions over 1000 lines are flagged | +| `readability-braces-around-statements.ShortStatementLines` | `0` | Braces required for all control-flow bodies, even single-line | + +______________________________________________________________________ + +## pybind11 Extension Modules + +Extension modules live under `gigl-core/src/`. + +### Naming convention + +| File | Purpose | +| -------------------------- | ---------------------------------------------------------------- | +| `python_.cpp` | pybind11 bindings — contains the `PYBIND11_MODULE` definition | +| `.cpp` / `.cu` | Implementation — function and class definitions | +| `.h` | Declarations (function signatures, class definitions, constants) | + +Example: to add a new function, create `gigl-core/src/my_op.h`, `gigl-core/src/my_op.cpp`, and +`gigl-core/src/python_my_op.cpp`, then list the sources in `pybind11_add_module(...)` inside `gigl-core/CMakeLists.txt`. +The compiled extension is bundled into the `_core` module and importable as `gigl_core._core.my_op` (re-exported from +`gigl_core/__init__.py`). diff --git a/docs/user_guide/getting_started/installation.md b/docs/user_guide/getting_started/installation.md index 4f3d40cdf..ef940db79 100644 --- a/docs/user_guide/getting_started/installation.md +++ b/docs/user_guide/getting_started/installation.md @@ -10,8 +10,10 @@ These are the current environments supported by GiGL ## Available Versions -You can see the available wheels for GiGL -[here](https://console.cloud.google.com/artifacts/python/external-snap-ci-github-gigl/us-central1/gigl/gigl?project=external-snap-ci-github-gigl) +You can see the available wheels for GiGL: + +- [CPU wheels](https://console.cloud.google.com/artifacts/python/external-snap-ci-github-gigl/us-central1/gigl/gigl?project=external-snap-ci-github-gigl) +- [CUDA wheels](https://console.cloud.google.com/artifacts/python/external-snap-ci-github-gigl/us-central1/gigl/gigl-cu128?project=external-snap-ci-github-gigl) ## Install Prerequisites - setting up your dev machine @@ -107,7 +109,7 @@ Below we provide two ways to bootstrap an environment for using and/or developin ```bash pip install "gigl[pyg27-torch28-cu128, transform]==0.1.0" \ ---extra-index-url=https://us-central1-python.pkg.dev/external-snap-ci-github-gigl/gigl/simple/ \ +--extra-index-url=https://us-central1-python.pkg.dev/external-snap-ci-github-gigl/gigl-cu128/simple/ \ --extra-index-url=https://download.pytorch.org/whl/cu128 \ --extra-index-url=https://data.pyg.org/whl/torch-2.8.0+cu128.html ``` diff --git a/gigl-core/.clang-format b/gigl-core/.clang-format new file mode 100644 index 000000000..fccba9f02 --- /dev/null +++ b/gigl-core/.clang-format @@ -0,0 +1,87 @@ +--- +# BasedOnStyle: LLVM +AccessModifierOffset: -4 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: false +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BreakAfterJavaFieldAnnotations: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: false +BreakConstructorInitializersBeforeComma: false +BreakStringLiterals: true +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +FixNamespaceComments: true +ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +# Lower Priority numbers sort first (Priority 1 appears before Priority 2, etc.). +# This is the Google/PyTorch convention — the opposite of the LLVM default. +# Result: project-local headers (1) → torch/pybind11 (2) → system/third-party (3). +# Add graphlearn_torch to the Priority 2 regex when GLT headers appear in the codebase. +IncludeCategories: + - Regex: '.*' + Priority: 1 + - Regex: '^<(torch|pybind11)/' + Priority: 2 + - Regex: '^(<|"gtest/)' + Priority: 3 +IncludeIsMainRegex: '^$' +IndentCaseLabels: true +IndentWidth: 4 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 4 +ObjCSpaceAfterProperty: true +ObjCSpaceBeforeProtocolList: true +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 10000000 +PointerAlignment: Left +RawStringFormats: + - Delimiters: [pb] + Language: TextProto + BasedOnStyle: google +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: c++17 +TabWidth: 4 +UseTab: Never +... diff --git a/gigl-core/.clang-tidy b/gigl-core/.clang-tidy new file mode 100644 index 000000000..153fbe541 --- /dev/null +++ b/gigl-core/.clang-tidy @@ -0,0 +1,236 @@ +--- +# -bugprone-implicit-widening-of-multiplication-result is disabled because it +# crashes clang-tidy 15 on a construct in ATen/core/dynamic_type.h (upstream +# LLVM bug). Re-enable when upgrading past clang-tidy 15. +# All other disabled checks are documented in docs/cpp_style_guide.md. +Checks: > + boost-use-to-string, + bugprone-*, + -bugprone-easily-swappable-parameters, + -bugprone-implicit-widening-of-multiplication-result, + -bugprone-narrowing-conversions, + cert-err34-c, + cert-flp30-c, + cert-msc32-c, + cert-msc50-cpp, + cert-msc51-cpp, + clang-diagnostic-*, + cppcoreguidelines-interfaces-global-init, + cppcoreguidelines-no-malloc, + cppcoreguidelines-pro-type-static-cast-downcast, + cppcoreguidelines-pro-type-union-access, + cppcoreguidelines-slicing, + google-build-namespaces, + google-explicit-constructor, + google-global-names-in-headers, + google-readability-casting, + google-runtime-member-string-references, + google-runtime-memset, + hicpp-exception-baseclass, + misc-*, + -misc-confusable-identifiers, + -misc-const-correctness, + -misc-no-recursion, + modernize-*, + -modernize-avoid-c-arrays, + -modernize-use-trailing-return-type, + performance-*, + readability-*, + -readability-avoid-const-params-in-decls, + -readability-function-cognitive-complexity, + -readability-container-contains, + -readability-identifier-length, + -readability-magic-numbers, + +# WarningsAsErrors and HeaderFilterRegex work together: +# HeaderFilterRegex scopes which headers clang-tidy reports diagnostics for. +# Warnings from headers outside the regex (PyTorch, pybind11, etc.) are suppressed +# entirely and never reach WarningsAsErrors — so the large warning counts printed +# by clang-tidy ("N warnings generated") are third-party noise that is silently +# dropped. Only diagnostics in our own headers (.*/gigl-core/src/.*) are reported, +# and those are treated as hard errors. +WarningsAsErrors: '*' +HeaderFilterRegex: '.*/gigl-core/src/.*' +FormatStyle: none +# CheckOptions: per-check tuning parameters. Each entry configures a specific +# option for an individual check, using the form: +# key: . +# value: +# These let you adjust thresholds, naming patterns, and behavior without +# enabling or disabling the check entirely. +CheckOptions: + - key: bugprone-argument-comment.StrictMode + value: '0' + - key: bugprone-assert-side-effect.AssertMacros + value: 'assert,SC_ASSERT' + - key: bugprone-assert-side-effect.CheckFunctionCalls + value: '0' + - key: bugprone-dangling-handle.HandleClasses + value: 'std::basic_string_view;std::experimental::basic_string_view' + - key: bugprone-string-constructor.LargeLengthThreshold + value: '8388608' + - key: bugprone-string-constructor.WarnOnLargeLength + value: '1' + - key: cppcoreguidelines-pro-type-member-init.IgnoreArrays + value: '1' + - key: google-global-names-in-headers.HeaderFileExtensions + value: ',h,hh,hpp,hxx' + - key: google-readability-function-size.StatementThreshold + value: '800' + - key: google-readability-namespace-comments.ShortNamespaceLines + value: '10' + - key: google-readability-namespace-comments.SpacesBeforeComments + value: '2' + - key: misc-definitions-in-headers.HeaderFileExtensions + value: ',h,hh,hpp,hxx' + - key: misc-definitions-in-headers.UseHeaderFileExtension + value: '1' + - key: misc-misplaced-widening-cast.CheckImplicitCasts + value: '0' + - key: misc-sizeof-expression.WarnOnSizeOfCompareToConstant + value: '1' + - key: misc-sizeof-expression.WarnOnSizeOfConstant + value: '1' + - key: misc-sizeof-expression.WarnOnSizeOfThis + value: '1' + - key: misc-suspicious-enum-usage.StrictMode + value: '0' + - key: misc-suspicious-missing-comma.MaxConcatenatedTokens + value: '5' + - key: misc-suspicious-missing-comma.RatioThreshold + value: '0.200000' + - key: misc-suspicious-missing-comma.SizeThreshold + value: '5' + - key: misc-suspicious-string-compare.StringCompareLikeFunctions + value: '' + - key: misc-suspicious-string-compare.WarnOnImplicitComparison + value: '1' + - key: misc-suspicious-string-compare.WarnOnLogicalNotComparison + value: '0' + - key: misc-throw-by-value-catch-by-reference.CheckThrowTemporaries + value: '1' + - key: modernize-loop-convert.MaxCopySize + value: '16' + - key: modernize-loop-convert.MinConfidence + value: reasonable + - key: modernize-loop-convert.NamingStyle + value: camelBack + - key: modernize-loop-convert.UseCxx20ReverseRanges + value: '0' + - key: modernize-make-unique.IgnoreMacros + value: '1' + - key: modernize-make-unique.IncludeStyle + value: 'llvm' + - key: modernize-make-unique.MakeSmartPtrFunction + value: 'std::make_unique' + - key: modernize-make-unique.MakeSmartPtrFunctionHeader + value: memory + - key: modernize-pass-by-value.IncludeStyle + value: llvm + - key: modernize-replace-auto-ptr.IncludeStyle + value: llvm + - key: modernize-use-emplace.ContainersWithPushBack + value: '::std::vector;::std::list;::std::deque' + - key: modernize-use-emplace.SmartPointers + value: '::std::shared_ptr;::std::unique_ptr;::std::auto_ptr;::std::weak_ptr' + - key: modernize-use-emplace.TupleMakeFunctions + value: '::std::make_pair;::std::make_tuple' + - key: modernize-use-emplace.TupleTypes + value: '::std::pair;::std::tuple' + - key: modernize-use-noexcept.ReplacementString + value: '' + - key: modernize-use-noexcept.UseNoexceptFalse + value: '1' + - key: modernize-use-nullptr.NullMacros + value: 'NULL' + - key: modernize-use-transparent-functors.SafeMode + value: '0' + - key: performance-faster-string-find.StringLikeClasses + value: 'std::basic_string' + - key: performance-for-range-copy.WarnOnAllAutoCopies + value: '0' + - key: performance-inefficient-string-concatenation.StrictMode + value: '0' + - key: performance-inefficient-vector-operation.VectorLikeClasses + value: '::std::vector' + - key: performance-move-const-arg.CheckTriviallyCopyableMove + value: '1' + - key: performance-move-constructor-init.IncludeStyle + value: llvm + - key: performance-type-promotion-in-math-fn.IncludeStyle + value: llvm + - key: readability-braces-around-statements.ShortStatementLines + value: '0' + # BranchThreshold, NestingThreshold, and ParameterThreshold are set to UINT32_MAX + # to effectively disable these sub-checks. GNN/ML kernels legitimately have deep + # nesting (loops over nodes, edges, and features) and many parameters (model configs, + # hyperparameters), so enforcing these limits would generate noise on valid code. + - key: readability-function-size.BranchThreshold + value: '4294967295' + - key: readability-function-size.LineThreshold + value: '1000' + - key: readability-function-size.NestingThreshold + value: '4294967295' + - key: readability-function-size.ParameterThreshold + value: '4294967295' + - key: readability-function-size.StatementThreshold + value: '800' + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.ClassConstantPrefix + value: k + - key: readability-identifier-naming.ClassConstantCase + value: CamelCase + - key: readability-identifier-naming.ClassMemberCase + value: camelBack + - key: readability-identifier-naming.ConstexprVariableCase + value: CamelCase + - key: readability-identifier-naming.ConstexprVariablePrefix + value: k + - key: readability-identifier-naming.EnumCase + value: CamelCase + - key: readability-identifier-naming.EnumConstantCase + value: CamelCase + - key: readability-identifier-naming.FunctionCase + value: camelBack + - key: readability-identifier-naming.GlobalConstantPrefix + value: k + - key: readability-identifier-naming.GlobalConstantCase + value: CamelCase + - key: readability-identifier-naming.IgnoreFailedSplit + value: '0' + - key: readability-identifier-naming.LocalConstantCase + value: camelBack + - key: readability-identifier-naming.MemberCase + value: camelBack + - key: readability-identifier-naming.MethodCase + value: camelBack + - key: readability-identifier-naming.ParameterCase + value: camelBack + - key: readability-identifier-naming.PrivateMemberCase + value: camelBack + - key: readability-identifier-naming.PrivateMemberPrefix + value: _ + - key: readability-identifier-naming.ProtectedMemberCase + value: camelBack + - key: readability-identifier-naming.ProtectedMemberPrefix + value: _ + - key: readability-identifier-naming.PublicMemberCase + value: camelBack + - key: readability-identifier-naming.TemplateParameterCase + value: camelBack + - key: readability-identifier-naming.TypeTemplateParameterCase + value: CamelCase + - key: readability-identifier-naming.UnionCase + value: CamelCase + - key: readability-identifier-naming.VariableCase + value: camelBack + - key: readability-implicit-bool-conversion.AllowPointerConditions + value: '1' + - key: readability-simplify-boolean-expr.ChainedConditionalAssignment + value: '0' + - key: readability-simplify-boolean-expr.ChainedConditionalReturn + value: '0' + - key: readability-static-accessed-through-instance.NameSpecifierNestingThreshold + value: '3' +... diff --git a/gigl-core/CMakeLists.txt b/gigl-core/CMakeLists.txt new file mode 100644 index 000000000..a043af8e0 --- /dev/null +++ b/gigl-core/CMakeLists.txt @@ -0,0 +1,78 @@ +cmake_minimum_required(VERSION 3.18) +project(gigl_core CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# CMP0104: CMake 3.18+ warns when enable_language(CUDA) is called without +# CMAKE_CUDA_ARCHITECTURES being set. Set it to OFF — Torch provides a +# comprehensive arch list via TORCH_LIBRARIES. +cmake_policy(SET CMP0104 NEW) +set(CMAKE_CUDA_ARCHITECTURES OFF) + +# Enable CUDA only when the toolkit is present; allows the same CMakeLists.txt +# to build on CPU-only machines without requiring nvcc. find_package(Torch) +# internally calls enable_language(CUDA) when torch was built with CUDA support, +# which requires CMAKE_CUDA_COMPILER to be set — hence the nvcc hint below. +include(CheckLanguage) +check_language(CUDA) +if(NOT CMAKE_CUDA_COMPILER) + find_program(CMAKE_CUDA_COMPILER nvcc HINTS /usr/local/cuda/bin) +endif() +if(CMAKE_CUDA_COMPILER) + enable_language(CUDA) + set(CMAKE_CUDA_STANDARD 17) + set(CMAKE_CUDA_STANDARD_REQUIRED ON) +endif() + +find_package(Python COMPONENTS Interpreter Development.Module REQUIRED) + +# Locate pybind11 and torch cmake configs by scanning CMAKE_PREFIX_PATH, which +# scikit-build-core sets to the active environment's site-packages directory. +foreach(_prefix IN LISTS CMAKE_PREFIX_PATH) + if(NOT pybind11_DIR AND EXISTS "${_prefix}/pybind11/share/cmake/pybind11") + set(pybind11_DIR "${_prefix}/pybind11/share/cmake/pybind11") + endif() + if(NOT TORCH_CMAKE_PREFIX AND EXISTS "${_prefix}/torch/share/cmake") + set(TORCH_CMAKE_PREFIX "${_prefix}/torch/share/cmake") + endif() +endforeach() + +if(NOT pybind11_DIR) + message(FATAL_ERROR "Cannot find pybind11 cmake config in CMAKE_PREFIX_PATH: ${CMAKE_PREFIX_PATH}") +endif() +if(NOT TORCH_CMAKE_PREFIX) + message(FATAL_ERROR "Cannot find torch cmake config in CMAKE_PREFIX_PATH: ${CMAKE_PREFIX_PATH}") +endif() + +find_package(pybind11 CONFIG REQUIRED) +find_package(Torch REQUIRED PATHS "${TORCH_CMAKE_PREFIX}") + +# torch_python provides the pybind11 type casters for at::Tensor. It is not +# included in TORCH_LIBRARIES but is required for extensions that pass tensors +# across the Python/C++ boundary. +find_library(TORCH_PYTHON_LIBRARY torch_python PATHS "${TORCH_INSTALL_PREFIX}/lib" REQUIRED) + +set(GIGL_COMPILE_FLAGS -O3 -g -Wall -Wextra -Wno-unused-parameter) +# nvcc does not accept bare -Wall/-Wextra; wrap with -Xcompiler for CUDA sources. +set(GIGL_COMPILE_FLAGS_CUDA -O3 -Xcompiler=-g,-Wall,-Wextra,-Wno-unused-parameter) + +pybind11_add_module(_core + src/add_one.cpp + src/python_add_one.cpp +) +target_link_libraries(_core PRIVATE "${TORCH_LIBRARIES}" "${TORCH_PYTHON_LIBRARY}") +target_compile_options(_core PRIVATE + $<$:${GIGL_COMPILE_FLAGS}> + $<$:${GIGL_COMPILE_FLAGS_CUDA}> +) +# TORCH_EXTENSION_NAME is used in PYBIND11_MODULE() to name the module. +# PyTorch's own build system sets this; we must define it explicitly here. +target_compile_definitions(_core PRIVATE TORCH_EXTENSION_NAME=_core) +install(TARGETS _core DESTINATION gigl_core) + +option(GIGL_CORE_BUILD_TESTS "Build C++ unit tests" OFF) +if(GIGL_CORE_BUILD_TESTS) + enable_testing() + add_subdirectory(tests) +endif() diff --git a/gigl-core/README.md b/gigl-core/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/gigl-core/pyproject.toml b/gigl-core/pyproject.toml new file mode 100644 index 000000000..5c0243566 --- /dev/null +++ b/gigl-core/pyproject.toml @@ -0,0 +1,39 @@ +[project] +name = "gigl-core" +description = "GiGL C++/CUDA kernels (pybind11 extensions)" +readme = "README.md" +version = "0.2.0" +requires-python = "==3.11.*" +# Torch is resolved from the ambient environment. gigl-core wheels are ABI-bound +# to the torch variant they were built against (cpu or cu128). The parent `gigl` +# extras (pyg27-torch28-cpu / pyg27-torch28-cu128) already pin torch==2.8, and +# pip picks the matching gigl-core wheel from whichever GCP registry the user +# configured for gigl. Same pattern as PyG's pyg_lib. +dependencies = [] + +[build-system] +requires = ["scikit-build-core>=0.10", "pybind11>=2.12"] +build-backend = "scikit_build_core.build" + +[tool.scikit-build] +cmake.version = ">=3.18" +build-dir = "build/{wheel_tag}" +# Default editable mode is `redirect`: scikit-build-core writes the compiled .so +# into build/{wheel_tag}/ and installs an import-hook shim into site-packages +# that resolves `gigl_core._core` to that path. Plays correctly with src layouts +# where `inplace` mode would mis-place the .so at the project root. `editable.rebuild` +# stays off so imports never block on CMake — explicit `uv sync` or `uv pip install +# -e .` drives rebuilds. +editable.rebuild = false +# Always emit compile_commands.json into the scikit-build-core build dir so +# clang-tidy reads it directly — no custom wrapper script required. +cmake.define.CMAKE_EXPORT_COMPILE_COMMANDS = "ON" + +[tool.uv] +# Invalidate the uv build cache when any of these inputs change. Without this, +# editing a .cpp file in src/ would not trigger a rebuild on `uv sync`. +cache-keys = [ + { file = "pyproject.toml" }, + { file = "CMakeLists.txt" }, + { file = "src/**/*.{h,cpp,cu,cuh}" }, +] diff --git a/gigl-core/src/add_one.cpp b/gigl-core/src/add_one.cpp new file mode 100644 index 000000000..d46ba139f --- /dev/null +++ b/gigl-core/src/add_one.cpp @@ -0,0 +1,8 @@ +#include "add_one.h" + +#include + +at::Tensor addOne(const at::Tensor& input) { + TORCH_CHECK(input.device().is_cpu(), "add_one requires a CPU tensor"); + return input + 1; +} diff --git a/gigl-core/src/add_one.h b/gigl-core/src/add_one.h new file mode 100644 index 000000000..1f4ee902e --- /dev/null +++ b/gigl-core/src/add_one.h @@ -0,0 +1,5 @@ +#pragma once + +#include + +at::Tensor addOne(const at::Tensor& input); diff --git a/gigl-core/src/gigl_core/__init__.py b/gigl-core/src/gigl_core/__init__.py new file mode 100644 index 000000000..558413fcd --- /dev/null +++ b/gigl-core/src/gigl_core/__init__.py @@ -0,0 +1,3 @@ +from gigl_core._core import add_one + +__all__ = ["add_one"] diff --git a/gigl-core/src/gigl_core/_core.pyi b/gigl-core/src/gigl_core/_core.pyi new file mode 100644 index 000000000..14c54ee39 --- /dev/null +++ b/gigl-core/src/gigl_core/_core.pyi @@ -0,0 +1,3 @@ +import torch + +def add_one(tensor: torch.Tensor) -> torch.Tensor: ... diff --git a/gigl-core/src/python_add_one.cpp b/gigl-core/src/python_add_one.cpp new file mode 100644 index 000000000..c91b826a8 --- /dev/null +++ b/gigl-core/src/python_add_one.cpp @@ -0,0 +1,12 @@ +#include "add_one.h" + +#include + +#include + +PYBIND11_MODULE(_core, m) { + m.doc() = "GiGL core pybind11 extension module."; + m.def("add_one", + &addOne, + "Return a new tensor equal to the input with one added to each element. Requires a CPU tensor."); +} diff --git a/gigl-core/tests/CMakeLists.txt b/gigl-core/tests/CMakeLists.txt new file mode 100644 index 000000000..15f5edc33 --- /dev/null +++ b/gigl-core/tests/CMakeLists.txt @@ -0,0 +1,33 @@ +# GoogleTest via FetchContent. Pinned to v1.14.0 (stable, C++17 compatible). +include(FetchContent) +FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG v1.14.0 + GIT_SHALLOW TRUE +) +# Prevent GoogleTest from overriding the compiler's runtime on Windows +# (no-op on Linux/Mac, but required for portable CMake config). +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) +FetchContent_MakeAvailable(googletest) + +# Any file named *_test.cpp (or *_test.cu when CUDA is enabled) in this +# directory is compiled into its own CTest binary. To add a new test suite, +# drop a *_test.cpp file here — no changes to this file required. +if(CMAKE_CUDA_COMPILER) + file(GLOB_RECURSE TEST_SOURCES "*_test.cpp" "*_test.cu") +else() + file(GLOB_RECURSE TEST_SOURCES "*_test.cpp") +endif() + +foreach(test_source ${TEST_SOURCES}) + # Derive a unique binary name from the relative path, e.g.: + # add_one_test.cpp → add_one_test + # sampling/ppr_forward_push_test.cpp → sampling_ppr_forward_push_test + file(RELATIVE_PATH _rel "${CMAKE_CURRENT_SOURCE_DIR}" "${test_source}") + string(REPLACE "/" "_" test_name "${_rel}") + string(REGEX REPLACE "\\.[^.]+$" "" test_name "${test_name}") + add_executable(${test_name} ${test_source}) + target_link_libraries(${test_name} GTest::gtest_main) + add_test(NAME ${test_name} COMMAND ${test_name}) +endforeach() diff --git a/gigl-core/tests/infrastructure_test.cpp b/gigl-core/tests/infrastructure_test.cpp new file mode 100644 index 000000000..a329be257 --- /dev/null +++ b/gigl-core/tests/infrastructure_test.cpp @@ -0,0 +1,10 @@ +// Placeholder C++ unit test. +// +// This file exists to verify that the GoogleTest infrastructure compiles and +// runs end-to-end. + +#include + +TEST(PlaceholderTest, BasicArithmetic) { + EXPECT_EQ(1 + 1, 2); +} diff --git a/gigl/dep_vars.env b/gigl/dep_vars.env index 85ef4d21b..ec535c24b 100644 --- a/gigl/dep_vars.env +++ b/gigl/dep_vars.env @@ -1,7 +1,7 @@ # Note this file only supports static key value pairs so it can be loaded by make, bash, python, and sbt without any additional parsing. -DOCKER_LATEST_BASE_CUDA_IMAGE_NAME_WITH_TAG=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/gigl-cuda-base:b34c863a2168c8df5a6da1f6385e5d374f0175d2.91.1 -DOCKER_LATEST_BASE_CPU_IMAGE_NAME_WITH_TAG=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/gigl-cpu-base:b34c863a2168c8df5a6da1f6385e5d374f0175d2.91.1 -DOCKER_LATEST_BASE_DATAFLOW_IMAGE_NAME_WITH_TAG=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/gigl-dataflow-base:b34c863a2168c8df5a6da1f6385e5d374f0175d2.91.1 +DOCKER_LATEST_BASE_CUDA_IMAGE_NAME_WITH_TAG=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/gigl-cuda-base:3738af6cca02750514278cb63c98c88d07c45f7b.99.1 +DOCKER_LATEST_BASE_CPU_IMAGE_NAME_WITH_TAG=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/gigl-cpu-base:3738af6cca02750514278cb63c98c88d07c45f7b.99.1 +DOCKER_LATEST_BASE_DATAFLOW_IMAGE_NAME_WITH_TAG=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/gigl-dataflow-base:3738af6cca02750514278cb63c98c88d07c45f7b.99.1 DEFAULT_GIGL_RELEASE_SRC_IMAGE_CUDA=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/src-cuda:0.2.0 DEFAULT_GIGL_RELEASE_SRC_IMAGE_CPU=us-central1-docker.pkg.dev/external-snap-ci-github-gigl/public-gigl/src-cpu:0.2.0 diff --git a/gigl/orchestration/Dockerfile.customer_src b/gigl/orchestration/Dockerfile.customer_src index 25a5ea762..a1a195c2a 100644 --- a/gigl/orchestration/Dockerfile.customer_src +++ b/gigl/orchestration/Dockerfile.customer_src @@ -9,5 +9,7 @@ WORKDIR /gigl COPY . . -# Find out if there is 'setup.py' or 'pyproject.toml' in the current directory, if so install it -RUN if [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then pip install -e .; fi +# Find out if there is 'setup.py' or 'pyproject.toml' in the current directory, if so install it. +# The customer's [build-system].requires handles any build-time dependencies via PEP 517 build +# isolation — we no longer need to force-install scikit-build-core/pybind11 globally. +RUN if [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then uv pip install .; fi diff --git a/hello b/hello new file mode 100644 index 000000000..e69de29bb diff --git a/marco b/marco new file mode 100644 index 000000000..e69de29bb diff --git a/pyproject.toml b/pyproject.toml index b0d0345f2..5a958440a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,9 @@ requires-python = "==3.11.*" # Limited by tfx-bsl not having wheels available fo dependencies = [ "argo-workflows", "chardet", + # gigl-core hosts all C++ / CUDA / pybind11 extensions. Separate wheel per torch + # variant (see [tool.uv.sources] below). Same pattern PyG uses for pyg_lib. + "gigl-core", "google-cloud-aiplatform", "google-cloud-dataproc", "google-cloud-logging", @@ -80,7 +83,6 @@ pyg27-torch28-cu128 = [ "torch_sparse; sys_platform != 'darwin'", "torch_spline_conv; sys_platform != 'darwin'", "torchrec ; sys_platform != 'darwin'", - ] experimental = [ "hydra-core==1.3.2", @@ -99,6 +101,7 @@ required-environments = [ "sys_platform == 'linux' and platform_machine == 'x86_64'", "sys_platform == 'darwin' and platform_machine == 'arm64'", ] +no-build-isolation-package = ["gigl-core"] [dependency-groups] dev = [ @@ -177,12 +180,26 @@ explicit = true format = "flat" # =============================== Google Artifact Registry Index ======================== +# Two separate indexes — one per torch variant — so pip always fetches the correct wheel. +# If both variants lived in the same index, pip would pick between them arbitrarily since +# both wheels share the same platform tag (linux_x86_64). Separate indexes remove the +# ambiguity and are essential once any extension contains CUDA kernels. +# +# The CPU registry reuses the original `gigl` repo for backwards compatibility. +# The CUDA registry is a new `gigl-cu128` repo that must be created in GCP project +# `external-snap-ci-github-gigl` before the release workflow will succeed. [[tool.uv.index]] -name = "gcp-release-registry" +name = "gcp-release-registry-cpu" url = "https://us-central1-python.pkg.dev/external-snap-ci-github-gigl/gigl/simple/" publish-url = "https://us-central1-python.pkg.dev/external-snap-ci-github-gigl/gigl" explicit = true +[[tool.uv.index]] +name = "gcp-release-registry-cu128" +url = "https://us-central1-python.pkg.dev/external-snap-ci-github-gigl/gigl-cu128/simple/" +publish-url = "https://us-central1-python.pkg.dev/external-snap-ci-github-gigl/gigl-cu128" +explicit = true + # ========== Mapping individual packages to their appropriate index ============= [tool.uv.sources] # ============= PyTorch hosted Package Index Mappings @@ -221,18 +238,33 @@ torch_spline_conv = [ { extra = "pyg27-torch28-cpu", index = "pyg-torch28-cpu" }, { extra = "pyg27-torch28-cu128", index = "pyg-torch28-cu128" }, ] +# gigl-core resolves to the local workspace member in dev — uv sync builds it +# from source via scikit-build-core. At release install time, the published +# gigl wheel has `Requires-Dist: gigl-core`; consumers resolve gigl-core from +# whichever GCP index they already configured for installing gigl itself +# (both variants are published to gigl-cu128 and gigl CPU registries). +gigl-core = { workspace = true } +[tool.uv.workspace] +members = [ + "gigl-core", +] # ===================== Build/Project Configurations =========================== +# gigl is a pure-Python package. All C++/CUDA kernels live in the gigl-core +# wheel (see gigl-core/pyproject.toml). Release builds two variants per package: +# cpu and cu128. gigl's build stays fast because no CMake runs here. [build-system] -requires = ["setuptools>=61.0.0", "wheel"] +requires = ["setuptools>=68"] build-backend = "setuptools.build_meta" - [tool.setuptools.packages.find] -where = ["."] # list of folders that contain the packages -include = ["gigl*", "snapchat*"] # Include only packages that match the specified patterns +# Include all sub-packages under gigl/ and snapchat/. gigl-core is a separate +# workspace member with its own pyproject.toml and is excluded here. +include = ["gigl*", "snapchat*"] +exclude = ["gigl-core*", "tests*"] +namespaces = true [project.urls] Homepage = "https://github.com/snapchat/gigl" @@ -240,12 +272,6 @@ Homepage = "https://github.com/snapchat/gigl" [project.scripts] gigl-post-install = "gigl.scripts.post_install:main" -[tool.setuptools.package-data] -# Include dep_vars.env from the root directory -"gigl" = ["dep_vars.env", "**/*.yaml"] -"gigl.scripts" = ["*.sh"] - - [tool.ruff] # Skip generated proto files. exclude = ["*_pb2.py", "*_pb2.pyi"] diff --git a/requirements/install_cpp_deps.sh b/requirements/install_cpp_deps.sh new file mode 100644 index 000000000..d3e0a4353 --- /dev/null +++ b/requirements/install_cpp_deps.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Install C++ development tools: clang-format, clang-tidy, cmake. +# +# Usage: +# bash requirements/install_cpp_deps.sh +# +# Called by `make install_dev_deps` alongside install_py_deps.sh and +# install_scala_deps.sh. +# +# NOTE: macOS is not supported. C++ tooling requires GLT, which does not run on macOS. + +set -e +set -x + +if [ "$(uname)" == "Darwin" ]; then + echo "ERROR: macOS is not supported for C++ tooling (GLT does not run on macOS)." >&2 + exit 1 +fi + +# On Linux, apt-get installs versioned binaries (e.g. clang-format-15) directly +# into /usr/bin. No PATH changes are needed since /usr/bin is already on PATH. +# Callers use the versioned names (clang-format-15, clang-tidy-15, clangd-15) +# directly so the version is explicit and greppable across the codebase. +# clang++-15 requires libstdc++-12-dev: on Ubuntu 22.04, clang++-15 looks for GCC 12 +# headers. Without this package clang++-15 cannot find standard headers like . +# clang++-15 itself is needed because generate_compile_commands.py rewrites +# compile_commands.json to use it so clangd natively understands the commands. +sudo apt-get update -y +sudo apt-get install -y clang-format-15 clang-tidy-15 clangd-15 clang++-15 libstdc++-12-dev cmake + +# Verify cmake >= 3.18 (our CMakeLists.txt requires it; Ubuntu 20.04 apt provides 3.16). +cmake_version=$(cmake --version | awk 'NR==1{print $3}') +if ! printf '3.18\n%s\n' "$cmake_version" | sort -V -C 2>/dev/null; then + echo "ERROR: cmake >= 3.18 required, found $cmake_version. See https://cmake.org/download/" >&2 + exit 1 +fi + +echo "Finished installing C++ tooling" diff --git a/requirements/install_py_deps.sh b/requirements/install_py_deps.sh index 6a328165e..3e7ba6252 100644 --- a/requirements/install_py_deps.sh +++ b/requirements/install_py_deps.sh @@ -45,12 +45,10 @@ has_cuda_driver() { is_running_on_mac() { [ "$(uname)" == "Darwin" ] - return $? } is_running_on_m1_mac() { [ "$(uname)" == "Darwin" ] && [ $(uname -m) == 'arm64' ] - return $? } ### Installation Functions ### @@ -138,6 +136,11 @@ install_gigl_lib_deps() { flag_use_inexact_match="--inexact" fi + # --no-install-project: skip building and installing the gigl package itself here. + # The project is installed separately via `uv pip install -e .` + # (see Makefile install_dev_deps/install_deps targets and the *.src Dockerfiles). + # This avoids scikit-build-core requiring all source files (e.g. README.md, CMakeLists.txt) + # to be present in environments that only need the dependencies (e.g. base Docker images). if [[ $DEV -eq 1 ]] then # https://docs.astral.sh/uv/reference/cli/#uv-sync @@ -151,7 +154,10 @@ install_gigl_lib_deps() { if [[ "${SKIP_GLT_POST_INSTALL}" -eq 0 ]] then SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - uv run python $SCRIPT_DIR/../gigl/scripts/post_install.py + # --no-project: prevents uv from trying to install the gigl project before running + # the script. We intentionally skipped installing it above (--no-install-project), + # and post_install.py only runs install_glt.sh — it does not need gigl installed. + uv run --no-project python $SCRIPT_DIR/../gigl/scripts/post_install.py fi } diff --git a/tests/unit/core/__init__.py b/tests/unit/core/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/core/add_one_test.py b/tests/unit/core/add_one_test.py new file mode 100644 index 000000000..723726acf --- /dev/null +++ b/tests/unit/core/add_one_test.py @@ -0,0 +1,22 @@ +import torch + +from gigl_core import add_one +from tests.test_assets.test_case import TestCase + + +class TestAddOne(TestCase): + def test_add_one_returns_elementwise_increment(self) -> None: + actual = add_one(torch.tensor([1, 2, 3])) + expected = torch.tensor([2, 3, 4]) + self.assertTrue(torch.equal(actual, expected)) + + def test_add_one_is_out_of_place(self) -> None: + original = torch.tensor([1.0, 2.0]) + _ = add_one(original) + self.assertTrue(torch.equal(original, torch.tensor([1.0, 2.0]))) + + def test_add_one_rejects_cuda_tensor(self) -> None: + if not torch.cuda.is_available(): + self.skipTest("CUDA not available") + with self.assertRaises(RuntimeError): + add_one(torch.tensor([1, 2, 3], device="cuda")) diff --git a/uv.lock b/uv.lock index 8e369b607..9c06d1faa 100644 --- a/uv.lock +++ b/uv.lock @@ -17,6 +17,12 @@ conflicts = [[ { package = "gigl", extra = "pyg27-torch28-cu128" }, ]] +[manifest] +members = [ + "gigl", + "gigl-core", +] + [[package]] name = "absl-py" version = "1.4.0" @@ -607,8 +613,8 @@ dependencies = [ { name = "numpy", marker = "sys_platform != 'darwin'" }, ] wheels = [ - { url = "https://download-r2.pytorch.org/whl/cpu/fbgemm_gpu-1.3.0%2Bcpu-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:4154e803ba762906a604a72aa41685fdd49459fce55cea79d42ac7c45c8770ca" }, - { url = "https://download-r2.pytorch.org/whl/cpu/fbgemm_gpu-1.3.0%2Bcpu-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:0267ec844b43028f4b9b8e14acd16276e82bb97f91b6b1078f732eb9225b20c6" }, + { url = "https://download-r2.pytorch.org/whl/cpu/fbgemm_gpu-1.3.0%2Bcpu-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:4154e803ba762906a604a72aa41685fdd49459fce55cea79d42ac7c45c8770ca", upload-time = "2025-08-22T18:49:45Z" }, + { url = "https://download-r2.pytorch.org/whl/cpu/fbgemm_gpu-1.3.0%2Bcpu-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:0267ec844b43028f4b9b8e14acd16276e82bb97f91b6b1078f732eb9225b20c6", upload-time = "2025-08-22T18:49:45Z" }, ] [[package]] @@ -619,7 +625,7 @@ dependencies = [ { name = "numpy" }, ] wheels = [ - { url = "https://download-r2.pytorch.org/whl/cu128/fbgemm_gpu-1.3.0%2Bcu128-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:365a2c7f89e89f6d8acf3af5101cbb1651cd1cc64057fd2902feae490814cee3" }, + { url = "https://download-r2.pytorch.org/whl/cu128/fbgemm_gpu-1.3.0%2Bcu128-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:365a2c7f89e89f6d8acf3af5101cbb1651cd1cc64057fd2902feae490814cee3", upload-time = "2025-08-22T18:50:02Z" }, ] [[package]] @@ -707,6 +713,7 @@ source = { editable = "." } dependencies = [ { name = "argo-workflows" }, { name = "chardet" }, + { name = "gigl-core" }, { name = "google-cloud-aiplatform" }, { name = "google-cloud-dataproc" }, { name = "google-cloud-logging" }, @@ -841,6 +848,7 @@ requires-dist = [ { name = "chardet" }, { name = "fbgemm-gpu", marker = "sys_platform != 'darwin' and extra == 'pyg27-torch28-cpu'", specifier = "~=1.3.0", index = "https://download.pytorch.org/whl/cpu", conflict = { package = "gigl", extra = "pyg27-torch28-cpu" } }, { name = "fbgemm-gpu", marker = "sys_platform != 'darwin' and extra == 'pyg27-torch28-cu128'", specifier = "~=1.3.0", index = "https://download.pytorch.org/whl/cu128", conflict = { package = "gigl", extra = "pyg27-torch28-cu128" } }, + { name = "gigl-core", editable = "gigl-core" }, { name = "google-cloud-aiplatform" }, { name = "google-cloud-dataproc" }, { name = "google-cloud-logging" }, @@ -953,6 +961,11 @@ typing-stubs = [ { name = "types-tqdm", specifier = "==4.67.0.20250513" }, ] +[[package]] +name = "gigl-core" +version = "0.2.0" +source = { editable = "gigl-core" } + [[package]] name = "google-api-core" version = "2.28.1" @@ -4119,7 +4132,7 @@ dependencies = [ { name = "typing-extensions", marker = "platform_machine == 'arm64' and sys_platform == 'darwin'" }, ] wheels = [ - { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:3d05017d19bc99741288e458888283a44b0ee881d53f05f72f8b1cfea8998122" }, + { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:3d05017d19bc99741288e458888283a44b0ee881d53f05f72f8b1cfea8998122", upload-time = "2025-10-01T23:35:48Z" }, ] [[package]] @@ -4139,11 +4152,11 @@ dependencies = [ { name = "typing-extensions", marker = "(platform_machine != 'arm64' and extra == 'extra-4-gigl-pyg27-torch28-cpu') or (sys_platform != 'darwin' and extra == 'extra-4-gigl-pyg27-torch28-cpu') or (extra == 'extra-4-gigl-pyg27-torch28-cpu' and extra == 'extra-4-gigl-pyg27-torch28-cu128')" }, ] wheels = [ - { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-linux_s390x.whl", hash = "sha256:2bfc013dd6efdc8f8223a0241d3529af9f315dffefb53ffa3bf14d3f10127da6" }, - { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:680129efdeeec3db5da3f88ee5d28c1b1e103b774aef40f9d638e2cce8f8d8d8" }, - { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cb06175284673a581dd91fb1965662ae4ecaba6e5c357aa0ea7bb8b84b6b7eeb" }, - { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-win_amd64.whl", hash = "sha256:7631ef49fbd38d382909525b83696dc12a55d68492ade4ace3883c62b9fc140f" }, - { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-win_arm64.whl", hash = "sha256:41e6fc5ec0914fcdce44ccf338b1d19a441b55cafdd741fd0bf1af3f9e4cfd14" }, + { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-linux_s390x.whl", hash = "sha256:2bfc013dd6efdc8f8223a0241d3529af9f315dffefb53ffa3bf14d3f10127da6", upload-time = "2025-10-01T23:33:07Z" }, + { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:680129efdeeec3db5da3f88ee5d28c1b1e103b774aef40f9d638e2cce8f8d8d8", upload-time = "2025-10-01T23:33:11Z" }, + { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cb06175284673a581dd91fb1965662ae4ecaba6e5c357aa0ea7bb8b84b6b7eeb", upload-time = "2025-10-01T23:33:14Z" }, + { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-win_amd64.whl", hash = "sha256:7631ef49fbd38d382909525b83696dc12a55d68492ade4ace3883c62b9fc140f", upload-time = "2025-10-01T23:33:20Z" }, + { url = "https://download-r2.pytorch.org/whl/cpu/torch-2.8.0%2Bcpu-cp311-cp311-win_arm64.whl", hash = "sha256:41e6fc5ec0914fcdce44ccf338b1d19a441b55cafdd741fd0bf1af3f9e4cfd14", upload-time = "2025-10-01T23:33:36Z" }, ] [[package]] @@ -4174,8 +4187,8 @@ dependencies = [ { name = "typing-extensions", marker = "extra == 'extra-4-gigl-pyg27-torch28-cu128'" }, ] wheels = [ - { url = "https://download-r2.pytorch.org/whl/cu128/torch-2.8.0%2Bcu128-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:039b9dcdd6bdbaa10a8a5cd6be22c4cb3e3589a341e5f904cbb571ca28f55bed" }, - { url = "https://download-r2.pytorch.org/whl/cu128/torch-2.8.0%2Bcu128-cp311-cp311-win_amd64.whl", hash = "sha256:34c55443aafd31046a7963b63d30bc3b628ee4a704f826796c865fdfd05bb596" }, + { url = "https://download-r2.pytorch.org/whl/cu128/torch-2.8.0%2Bcu128-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:039b9dcdd6bdbaa10a8a5cd6be22c4cb3e3589a341e5f904cbb571ca28f55bed", upload-time = "2025-10-01T23:49:06Z" }, + { url = "https://download-r2.pytorch.org/whl/cu128/torch-2.8.0%2Bcu128-cp311-cp311-win_amd64.whl", hash = "sha256:34c55443aafd31046a7963b63d30bc3b628ee4a704f826796c865fdfd05bb596", upload-time = "2025-10-01T23:49:30Z" }, ] [[package]] @@ -4330,7 +4343,7 @@ dependencies = [ { name = "tqdm", marker = "sys_platform != 'darwin'" }, ] wheels = [ - { url = "https://download.pytorch.org/whl/cpu/torchrec-1.3.0%2Bcpu-py3-none-any.whl", hash = "sha256:be2b572625792feac1656afcac19e35448df5447d215575a4b8cb22d9220d2cf" }, + { url = "https://download.pytorch.org/whl/cpu/torchrec-1.3.0%2Bcpu-py3-none-any.whl", hash = "sha256:be2b572625792feac1656afcac19e35448df5447d215575a4b8cb22d9220d2cf", upload-time = "2025-09-17T07:14:32Z" }, ] [[package]] @@ -4346,7 +4359,7 @@ dependencies = [ { name = "tqdm" }, ] wheels = [ - { url = "https://download.pytorch.org/whl/cu128/torchrec-1.3.0%2Bcu128-py3-none-any.whl", hash = "sha256:6de7e4a70a6e95815a8f06b1dec4d982cea4d32fa7d86a10a8bb4c52b8a749b9" }, + { url = "https://download.pytorch.org/whl/cu128/torchrec-1.3.0%2Bcu128-py3-none-any.whl", hash = "sha256:6de7e4a70a6e95815a8f06b1dec4d982cea4d32fa7d86a10a8bb4c52b8a749b9", upload-time = "2025-09-17T07:14:37Z" }, ] [[package]]