From 94a21dafff8989a9793c1c0e127f14c03dd8d36a Mon Sep 17 00:00:00 2001 From: Jules Ivanic Date: Tue, 27 Jan 2026 18:21:25 +1100 Subject: [PATCH 1/5] Add SNAPSHOT publishing to Maven Central This adds automatic SNAPSHOT publishing to Maven Central's snapshots repository (https://central.sonatype.com/repository/maven-snapshots/) when commits are pushed to the main branch. Key insight: Unlike release publishing, SNAPSHOT publishing does NOT require GPG signatures or validation. This makes the implementation much simpler - we can use standard `mvn deploy:deploy-file` instead of the bundle-based Central Publisher API. Changes: - Add `scripts/jdbc_maven_deploy_snapshot.py` that uses Maven's deploy:deploy-file goal to upload pre-built JARs - Add `maven-snapshot-deploy` job to Java.yml workflow that runs after all platform builds complete on main branch The SNAPSHOT version is automatically calculated by incrementing the minor version from the last release tag (e.g., v1.4.x.x -> 1.5.0.0-SNAPSHOT). Closes #338 References: - https://central.sonatype.org/publish/publish-portal-snapshots/ - https://central.sonatype.org/news/20250114_snapshot_publishing_via_portal/ --- .github/workflows/Java.yml | 75 +++++++ scripts/jdbc_maven_deploy_snapshot.py | 292 ++++++++++++++++++++++++++ 2 files changed, 367 insertions(+) create mode 100755 scripts/jdbc_maven_deploy_snapshot.py diff --git a/.github/workflows/Java.yml b/.github/workflows/Java.yml index b27b9aee5..40a670c06 100644 --- a/.github/workflows/Java.yml +++ b/.github/workflows/Java.yml @@ -723,6 +723,81 @@ jobs: path: | jdbc-artifacts + # Publish SNAPSHOT builds to Maven Central Snapshots repository on main branch + # SNAPSHOTs don't require GPG signatures or validation + # See: https://central.sonatype.org/publish/publish-portal-snapshots/ + maven-snapshot-deploy: + if: ${{ github.repository == 'duckdb/duckdb-java' && github.ref == 'refs/heads/main' && github.event_name == 'push' }} + name: Maven SNAPSHOT Deploy + runs-on: ubuntu-latest + needs: + - java-linux-amd64 + - java-linux-amd64-tck + - java-linux-amd64-spark + - java-linux-aarch64 + - java-linux-amd64-musl + - java-linux-aarch64-musl + - java-windows-amd64 + - java-windows-aarch64 + - java-osx-universal + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up JDK + uses: actions/setup-java@v4 + with: + java-version: '11' + distribution: 'temurin' + + - shell: bash + run: mkdir jdbc-artifacts + + - uses: actions/download-artifact@v4 + with: + name: java-linux-amd64 + path: jdbc-artifacts/java-linux-amd64 + + - uses: actions/download-artifact@v4 + with: + name: java-linux-aarch64 + path: jdbc-artifacts/java-linux-aarch64 + + - uses: actions/download-artifact@v4 + with: + name: java-linux-amd64-musl + path: jdbc-artifacts/java-linux-amd64-musl + + - uses: actions/download-artifact@v4 + with: + name: java-linux-aarch64-musl + path: jdbc-artifacts/java-linux-aarch64-musl + + - uses: actions/download-artifact@v4 + with: + name: java-windows-amd64 + path: jdbc-artifacts/java-windows-amd64 + + - uses: actions/download-artifact@v4 + with: + name: java-windows-aarch64 + path: jdbc-artifacts/java-windows-aarch64 + + - uses: actions/download-artifact@v4 + with: + name: java-osx-universal + path: jdbc-artifacts/java-osx-universal + + - name: Deploy SNAPSHOT to Maven Central + shell: bash + env: + MAVEN_USERNAME: ${{ secrets.MAVEN_USERNAME }} + MAVEN_PASSWORD: ${{ secrets.MAVEN_PASSWORD }} + run: | + python ./scripts/jdbc_maven_deploy_snapshot.py jdbc-artifacts . + java-merge-vendoring-pr: name: Merge vendoring PR if: ${{ github.repository == 'duckdb/duckdb-java' && github.event_name == 'pull_request' && github.head_ref == format('vendoring-{0}', github.base_ref) }} diff --git a/scripts/jdbc_maven_deploy_snapshot.py b/scripts/jdbc_maven_deploy_snapshot.py new file mode 100755 index 000000000..94e7954bc --- /dev/null +++ b/scripts/jdbc_maven_deploy_snapshot.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python3 +""" +Deploy SNAPSHOT builds to Maven Central Snapshots repository. + +This script uses `mvn deploy:deploy-file` to upload pre-built JARs to +https://central.sonatype.com/repository/maven-snapshots/ + +Unlike release publishing, SNAPSHOT publishing: +- Does NOT require GPG signatures +- Does NOT require validation +- Uses standard Maven deploy mechanism +- Artifacts are cleaned up after 90 days + +Requirements: +- Maven installed and available in PATH +- MAVEN_USERNAME and MAVEN_PASSWORD environment variables set + (from https://central.sonatype.com/account) + +Usage: + python jdbc_maven_deploy_snapshot.py + +See: https://central.sonatype.org/publish/publish-portal-snapshots/ +""" + +import os +import pathlib +import subprocess +import sys +import tempfile +import re + +SNAPSHOT_REPO_URL = "https://central.sonatype.com/repository/maven-snapshots/" +GROUP_ID = "org.duckdb" +ARTIFACT_ID = "duckdb_jdbc" + +# Mapping of build directories to Maven classifiers +ARCH_BUILDS = { + 'java-linux-amd64': 'linux_amd64', + 'java-linux-aarch64': 'linux_arm64', + 'java-linux-amd64-musl': 'linux_amd64_musl', + 'java-linux-aarch64-musl': 'linux_arm64_musl', + 'java-osx-universal': 'macos_universal', + 'java-windows-amd64': 'windows_amd64', + 'java-windows-aarch64': 'windows_arm64', +} + +# Builds to combine into the main (fat) JAR +COMBINE_BUILDS = ['java-linux-amd64', 'java-osx-universal', 'java-windows-amd64', 'java-linux-aarch64'] + + +def exec(cmd, check=True): + """Execute a command and return output.""" + print(f"+ {cmd}") + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + if check and result.returncode != 0: + print(f"STDOUT: {result.stdout}") + print(f"STDERR: {result.stderr}") + raise RuntimeError(f"Command failed with code {result.returncode}") + return result.stdout.strip() + + +def get_snapshot_version(): + """Calculate SNAPSHOT version from the last release tag.""" + last_tag = exec('git tag --sort=-committerdate').split('\n')[0] + version_regex = re.compile(r'^v((\d+)\.(\d+)\.\d+\.\d+)$') + match = version_regex.search(last_tag) + if not match: + raise ValueError(f"Could not parse last tag: {last_tag}") + major = int(match.group(2)) + minor = int(match.group(3)) + # Increment minor version for SNAPSHOT + return f"{major}.{minor + 1}.0.0-SNAPSHOT" + + +def create_settings_xml(settings_path): + """Create Maven settings.xml with Central Portal credentials.""" + username = os.environ.get('MAVEN_USERNAME') + password = os.environ.get('MAVEN_PASSWORD') + + if not username or not password: + raise RuntimeError("MAVEN_USERNAME and MAVEN_PASSWORD environment variables are required") + + settings_content = f""" + + + + central-snapshots + {username} + {password} + + + +""" + pathlib.Path(settings_path).write_text(settings_content) + os.chmod(settings_path, 0o600) # Restrict permissions + + +def deploy_file(settings_path, version, file_path, classifier=None, packaging='jar'): + """Deploy a single file to the SNAPSHOT repository.""" + cmd = [ + 'mvn', 'deploy:deploy-file', + f'-DgroupId={GROUP_ID}', + f'-DartifactId={ARTIFACT_ID}', + f'-Dversion={version}', + f'-Dpackaging={packaging}', + f'-Dfile={file_path}', + f'-DrepositoryId=central-snapshots', + f'-Durl={SNAPSHOT_REPO_URL}', + f'-s', settings_path, + '-DgeneratePom=false', # We provide our own POM + ] + if classifier: + cmd.append(f'-Dclassifier={classifier}') + + exec(' '.join(cmd)) + + +def create_combined_jar(artifact_dir, staging_dir, version): + """Create a fat JAR combining native libraries from multiple platforms.""" + import zipfile + + combined_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}.jar') + + # Start with linux-amd64 as base (without its native lib) + base_jar = os.path.join(artifact_dir, 'java-linux-amd64', 'duckdb_jdbc.jar') + + with zipfile.ZipFile(base_jar) as src: + with zipfile.ZipFile(combined_jar, 'w') as dst: + for item in src.infolist(): + if not item.filename.startswith('libduckdb_java.so'): + dst.writestr(item, src.read(item.filename)) + + # Add native libraries from all platforms + for build in COMBINE_BUILDS: + build_jar = os.path.join(artifact_dir, build, 'duckdb_jdbc.jar') + with zipfile.ZipFile(build_jar) as src: + for item in src.infolist(): + if item.filename.startswith('libduckdb_java.so'): + with zipfile.ZipFile(combined_jar, 'a') as dst: + dst.writestr(item, src.read(item.filename)) + + return combined_jar + + +def create_pom(staging_dir, version): + """Create POM file for the artifact.""" + pom_content = f""" + + 4.0.0 + {GROUP_ID} + {ARTIFACT_ID} + {version} + jar + DuckDB JDBC Driver + A JDBC-Compliant driver for the DuckDB data management system + https://www.duckdb.org + + + + MIT License + https://raw.githubusercontent.com/duckdb/duckdb/main/LICENSE + repo + + + + + + Mark Raasveldt + mark@duckdblabs.com + DuckDB Labs + https://www.duckdblabs.com + + + Hannes Muehleisen + hannes@duckdblabs.com + DuckDB Labs + https://www.duckdblabs.com + + + + + scm:git:git://github.com/duckdb/duckdb-java.git + scm:git:ssh://github.com:duckdb/duckdb-java.git + https://github.com/duckdb/duckdb-java + + +""" + pom_path = os.path.join(staging_dir, f'duckdb_jdbc-{version}.pom') + pathlib.Path(pom_path).write_text(pom_content) + return pom_path + + +def create_sources_jar(jdbc_root, staging_dir, version): + """Create sources JAR.""" + sources_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-sources.jar') + exec(f'jar -cvf {sources_jar} -C {jdbc_root}/src/main/java org') + return sources_jar + + +def create_javadoc_jar(jdbc_root, staging_dir, version): + """Create javadoc JAR.""" + javadoc_dir = tempfile.mkdtemp() + exec(f'javadoc -Xdoclint:-reference -d {javadoc_dir} -sourcepath {jdbc_root}/src/main/java org.duckdb') + javadoc_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-javadoc.jar') + exec(f'jar -cvf {javadoc_jar} -C {javadoc_dir} .') + return javadoc_jar + + +def main(): + if len(sys.argv) < 2: + print("Usage: jdbc_maven_deploy_snapshot.py [jdbc_root_path]") + print("\nDeploys SNAPSHOT builds to Maven Central Snapshots repository.") + sys.exit(1) + + artifact_dir = sys.argv[1] + jdbc_root = sys.argv[2] if len(sys.argv) > 2 else '.' + + if not os.path.isdir(artifact_dir): + print(f"Error: artifact_dir '{artifact_dir}' is not a directory") + sys.exit(1) + + version = get_snapshot_version() + print(f"Deploying SNAPSHOT version: {version}") + + staging_dir = tempfile.mkdtemp() + settings_path = os.path.join(staging_dir, 'settings.xml') + + # Create Maven settings with credentials + create_settings_xml(settings_path) + + # Create artifacts + print("\n=== Creating artifacts ===") + pom_path = create_pom(staging_dir, version) + combined_jar = create_combined_jar(artifact_dir, staging_dir, version) + sources_jar = create_sources_jar(jdbc_root, staging_dir, version) + javadoc_jar = create_javadoc_jar(jdbc_root, staging_dir, version) + + # Deploy POM first (required for other artifacts) + print("\n=== Deploying POM ===") + deploy_file(settings_path, version, pom_path, packaging='pom') + + # Deploy main JAR + print("\n=== Deploying main JAR ===") + deploy_file(settings_path, version, combined_jar) + + # Deploy sources and javadoc + print("\n=== Deploying sources JAR ===") + deploy_file(settings_path, version, sources_jar, classifier='sources') + + print("\n=== Deploying javadoc JAR ===") + deploy_file(settings_path, version, javadoc_jar, classifier='javadoc') + + # Deploy architecture-specific JARs + print("\n=== Deploying architecture-specific JARs ===") + for build_name, classifier in ARCH_BUILDS.items(): + jar_path = os.path.join(artifact_dir, build_name, 'duckdb_jdbc.jar') + if os.path.exists(jar_path): + print(f"Deploying {classifier}...") + deploy_file(settings_path, version, jar_path, classifier=classifier) + else: + print(f"Warning: {jar_path} not found, skipping") + + # Create nolib JAR (JAR without native libraries) + print("\n=== Deploying nolib JAR ===") + import zipfile + nolib_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-nolib.jar') + base_jar = os.path.join(artifact_dir, 'java-linux-amd64', 'duckdb_jdbc.jar') + with zipfile.ZipFile(base_jar) as src: + with zipfile.ZipFile(nolib_jar, 'w') as dst: + for item in src.infolist(): + if not item.filename.startswith('libduckdb_java.so'): + dst.writestr(item, src.read(item.filename)) + deploy_file(settings_path, version, nolib_jar, classifier='nolib') + + print(f"\n=== SUCCESS ===") + print(f"SNAPSHOT {version} deployed to {SNAPSHOT_REPO_URL}") + print(f"\nTo use in Maven:") + print(f" ") + print(f" {GROUP_ID}") + print(f" {ARTIFACT_ID}") + print(f" {version}") + print(f" ") + print(f"\nWith repository:") + print(f" ") + print(f" central-snapshots") + print(f" {SNAPSHOT_REPO_URL}") + print(f" true") + print(f" ") + + +if __name__ == '__main__': + main() From 531b0f5def056cd8ed61993103d35e6e4b0146f5 Mon Sep 17 00:00:00 2001 From: Jules Ivanic Date: Tue, 27 Jan 2026 18:53:24 +1100 Subject: [PATCH 2/5] Fix SNAPSHOT version calculation to increment patch number Version v1.4.4.0 should produce 1.4.5.0-SNAPSHOT (not 1.5.0.0-SNAPSHOT) --- scripts/jdbc_maven_deploy_snapshot.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/scripts/jdbc_maven_deploy_snapshot.py b/scripts/jdbc_maven_deploy_snapshot.py index 94e7954bc..1d02491d0 100755 --- a/scripts/jdbc_maven_deploy_snapshot.py +++ b/scripts/jdbc_maven_deploy_snapshot.py @@ -62,14 +62,15 @@ def exec(cmd, check=True): def get_snapshot_version(): """Calculate SNAPSHOT version from the last release tag.""" last_tag = exec('git tag --sort=-committerdate').split('\n')[0] - version_regex = re.compile(r'^v((\d+)\.(\d+)\.\d+\.\d+)$') + version_regex = re.compile(r'^v(\d+)\.(\d+)\.(\d+)\.(\d+)$') match = version_regex.search(last_tag) if not match: raise ValueError(f"Could not parse last tag: {last_tag}") - major = int(match.group(2)) - minor = int(match.group(3)) - # Increment minor version for SNAPSHOT - return f"{major}.{minor + 1}.0.0-SNAPSHOT" + major = int(match.group(1)) + minor = int(match.group(2)) + patch = int(match.group(3)) + # Increment patch version for SNAPSHOT (e.g., v1.4.4.0 -> 1.4.5.0-SNAPSHOT) + return f"{major}.{minor}.{patch + 1}.0-SNAPSHOT" def create_settings_xml(settings_path): From b5d3d8345cb0a7fa7cbe3c4f048e8a7786c23c04 Mon Sep 17 00:00:00 2001 From: Jules Ivanic Date: Tue, 27 Jan 2026 19:12:26 +1100 Subject: [PATCH 3/5] Include commit hash in SNAPSHOT version for traceability e.g., v1.4.4.0 + commit abc1234 -> 1.4.5.0-abc1234-SNAPSHOT --- scripts/jdbc_maven_deploy_snapshot.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/scripts/jdbc_maven_deploy_snapshot.py b/scripts/jdbc_maven_deploy_snapshot.py index 1d02491d0..b0243b5c4 100755 --- a/scripts/jdbc_maven_deploy_snapshot.py +++ b/scripts/jdbc_maven_deploy_snapshot.py @@ -60,7 +60,7 @@ def exec(cmd, check=True): def get_snapshot_version(): - """Calculate SNAPSHOT version from the last release tag.""" + """Calculate SNAPSHOT version from the last release tag and current commit.""" last_tag = exec('git tag --sort=-committerdate').split('\n')[0] version_regex = re.compile(r'^v(\d+)\.(\d+)\.(\d+)\.(\d+)$') match = version_regex.search(last_tag) @@ -69,8 +69,11 @@ def get_snapshot_version(): major = int(match.group(1)) minor = int(match.group(2)) patch = int(match.group(3)) - # Increment patch version for SNAPSHOT (e.g., v1.4.4.0 -> 1.4.5.0-SNAPSHOT) - return f"{major}.{minor}.{patch + 1}.0-SNAPSHOT" + # Get short commit hash for traceability + commit_hash = exec('git rev-parse --short HEAD') + # Increment patch version and include commit hash + # e.g., v1.4.4.0 + commit abc1234 -> 1.4.5.0-abc1234-SNAPSHOT + return f"{major}.{minor}.{patch + 1}.0-{commit_hash}-SNAPSHOT" def create_settings_xml(settings_path): From 4e3a67eb78789a0bacfc726758a33a712dbb589a Mon Sep 17 00:00:00 2001 From: Jules Ivanic Date: Tue, 27 Jan 2026 19:37:06 +1100 Subject: [PATCH 4/5] Improve SNAPSHOT publishing script - Rename exec() to run_cmd() to avoid shadowing Python built-in - Move imports to top-level (zipfile, shutil) - Fix inefficient zipfile handling - single open instead of per-iteration - Add cleanup of temp directories (staging_dir, javadoc_dir) - Add timeout-minutes: 30 to workflow job - Add docstring explaining version component behavior - Extract create_nolib_jar to separate function --- .github/workflows/Java.yml | 1 + scripts/jdbc_maven_deploy_snapshot.py | 195 ++++++++++++++------------ 2 files changed, 108 insertions(+), 88 deletions(-) diff --git a/.github/workflows/Java.yml b/.github/workflows/Java.yml index 40a670c06..8f540f587 100644 --- a/.github/workflows/Java.yml +++ b/.github/workflows/Java.yml @@ -730,6 +730,7 @@ jobs: if: ${{ github.repository == 'duckdb/duckdb-java' && github.ref == 'refs/heads/main' && github.event_name == 'push' }} name: Maven SNAPSHOT Deploy runs-on: ubuntu-latest + timeout-minutes: 30 needs: - java-linux-amd64 - java-linux-amd64-tck diff --git a/scripts/jdbc_maven_deploy_snapshot.py b/scripts/jdbc_maven_deploy_snapshot.py index b0243b5c4..679801096 100755 --- a/scripts/jdbc_maven_deploy_snapshot.py +++ b/scripts/jdbc_maven_deploy_snapshot.py @@ -24,10 +24,12 @@ import os import pathlib +import re +import shutil import subprocess import sys import tempfile -import re +import zipfile SNAPSHOT_REPO_URL = "https://central.sonatype.com/repository/maven-snapshots/" GROUP_ID = "org.duckdb" @@ -48,7 +50,7 @@ COMBINE_BUILDS = ['java-linux-amd64', 'java-osx-universal', 'java-windows-amd64', 'java-linux-aarch64'] -def exec(cmd, check=True): +def run_cmd(cmd, check=True): """Execute a command and return output.""" print(f"+ {cmd}") result = subprocess.run(cmd, shell=True, capture_output=True, text=True) @@ -60,8 +62,14 @@ def exec(cmd, check=True): def get_snapshot_version(): - """Calculate SNAPSHOT version from the last release tag and current commit.""" - last_tag = exec('git tag --sort=-committerdate').split('\n')[0] + """ + Calculate SNAPSHOT version from the last release tag and current commit. + + DuckDB uses 4-part versioning (e.g., v1.4.4.0). We increment the third + component (patch) and reset the fourth to 0 for SNAPSHOTs. + Example: v1.4.4.0 + commit abc1234 -> 1.4.5.0-abc1234-SNAPSHOT + """ + last_tag = run_cmd('git tag --sort=-committerdate').split('\n')[0] version_regex = re.compile(r'^v(\d+)\.(\d+)\.(\d+)\.(\d+)$') match = version_regex.search(last_tag) if not match: @@ -69,10 +77,10 @@ def get_snapshot_version(): major = int(match.group(1)) minor = int(match.group(2)) patch = int(match.group(3)) + # Fourth component is intentionally reset to 0 for SNAPSHOT versions # Get short commit hash for traceability - commit_hash = exec('git rev-parse --short HEAD') + commit_hash = run_cmd('git rev-parse --short HEAD') # Increment patch version and include commit hash - # e.g., v1.4.4.0 + commit abc1234 -> 1.4.5.0-abc1234-SNAPSHOT return f"{major}.{minor}.{patch + 1}.0-{commit_hash}-SNAPSHOT" @@ -116,36 +124,46 @@ def deploy_file(settings_path, version, file_path, classifier=None, packaging='j if classifier: cmd.append(f'-Dclassifier={classifier}') - exec(' '.join(cmd)) + run_cmd(' '.join(cmd)) def create_combined_jar(artifact_dir, staging_dir, version): """Create a fat JAR combining native libraries from multiple platforms.""" - import zipfile - combined_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}.jar') - - # Start with linux-amd64 as base (without its native lib) base_jar = os.path.join(artifact_dir, 'java-linux-amd64', 'duckdb_jdbc.jar') - with zipfile.ZipFile(base_jar) as src: - with zipfile.ZipFile(combined_jar, 'w') as dst: + with zipfile.ZipFile(combined_jar, 'w') as dst: + # Copy base jar excluding native libs + with zipfile.ZipFile(base_jar) as src: for item in src.infolist(): if not item.filename.startswith('libduckdb_java.so'): dst.writestr(item, src.read(item.filename)) - # Add native libraries from all platforms - for build in COMBINE_BUILDS: - build_jar = os.path.join(artifact_dir, build, 'duckdb_jdbc.jar') - with zipfile.ZipFile(build_jar) as src: - for item in src.infolist(): - if item.filename.startswith('libduckdb_java.so'): - with zipfile.ZipFile(combined_jar, 'a') as dst: + # Add native libraries from all platforms + for build in COMBINE_BUILDS: + build_jar = os.path.join(artifact_dir, build, 'duckdb_jdbc.jar') + with zipfile.ZipFile(build_jar) as src: + for item in src.infolist(): + if item.filename.startswith('libduckdb_java.so'): dst.writestr(item, src.read(item.filename)) return combined_jar +def create_nolib_jar(artifact_dir, staging_dir, version): + """Create a JAR without native libraries.""" + nolib_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-nolib.jar') + base_jar = os.path.join(artifact_dir, 'java-linux-amd64', 'duckdb_jdbc.jar') + + with zipfile.ZipFile(base_jar) as src: + with zipfile.ZipFile(nolib_jar, 'w') as dst: + for item in src.infolist(): + if not item.filename.startswith('libduckdb_java.so'): + dst.writestr(item, src.read(item.filename)) + + return nolib_jar + + def create_pom(staging_dir, version): """Create POM file for the artifact.""" pom_content = f""" @@ -197,17 +215,20 @@ def create_pom(staging_dir, version): def create_sources_jar(jdbc_root, staging_dir, version): """Create sources JAR.""" sources_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-sources.jar') - exec(f'jar -cvf {sources_jar} -C {jdbc_root}/src/main/java org') + run_cmd(f'jar -cvf {sources_jar} -C {jdbc_root}/src/main/java org') return sources_jar def create_javadoc_jar(jdbc_root, staging_dir, version): """Create javadoc JAR.""" javadoc_dir = tempfile.mkdtemp() - exec(f'javadoc -Xdoclint:-reference -d {javadoc_dir} -sourcepath {jdbc_root}/src/main/java org.duckdb') - javadoc_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-javadoc.jar') - exec(f'jar -cvf {javadoc_jar} -C {javadoc_dir} .') - return javadoc_jar + try: + run_cmd(f'javadoc -Xdoclint:-reference -d {javadoc_dir} -sourcepath {jdbc_root}/src/main/java org.duckdb') + javadoc_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-javadoc.jar') + run_cmd(f'jar -cvf {javadoc_jar} -C {javadoc_dir} .') + return javadoc_jar + finally: + shutil.rmtree(javadoc_dir, ignore_errors=True) def main(): @@ -227,69 +248,67 @@ def main(): print(f"Deploying SNAPSHOT version: {version}") staging_dir = tempfile.mkdtemp() - settings_path = os.path.join(staging_dir, 'settings.xml') - - # Create Maven settings with credentials - create_settings_xml(settings_path) - - # Create artifacts - print("\n=== Creating artifacts ===") - pom_path = create_pom(staging_dir, version) - combined_jar = create_combined_jar(artifact_dir, staging_dir, version) - sources_jar = create_sources_jar(jdbc_root, staging_dir, version) - javadoc_jar = create_javadoc_jar(jdbc_root, staging_dir, version) - - # Deploy POM first (required for other artifacts) - print("\n=== Deploying POM ===") - deploy_file(settings_path, version, pom_path, packaging='pom') - - # Deploy main JAR - print("\n=== Deploying main JAR ===") - deploy_file(settings_path, version, combined_jar) - - # Deploy sources and javadoc - print("\n=== Deploying sources JAR ===") - deploy_file(settings_path, version, sources_jar, classifier='sources') - - print("\n=== Deploying javadoc JAR ===") - deploy_file(settings_path, version, javadoc_jar, classifier='javadoc') - - # Deploy architecture-specific JARs - print("\n=== Deploying architecture-specific JARs ===") - for build_name, classifier in ARCH_BUILDS.items(): - jar_path = os.path.join(artifact_dir, build_name, 'duckdb_jdbc.jar') - if os.path.exists(jar_path): - print(f"Deploying {classifier}...") - deploy_file(settings_path, version, jar_path, classifier=classifier) - else: - print(f"Warning: {jar_path} not found, skipping") - - # Create nolib JAR (JAR without native libraries) - print("\n=== Deploying nolib JAR ===") - import zipfile - nolib_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-nolib.jar') - base_jar = os.path.join(artifact_dir, 'java-linux-amd64', 'duckdb_jdbc.jar') - with zipfile.ZipFile(base_jar) as src: - with zipfile.ZipFile(nolib_jar, 'w') as dst: - for item in src.infolist(): - if not item.filename.startswith('libduckdb_java.so'): - dst.writestr(item, src.read(item.filename)) - deploy_file(settings_path, version, nolib_jar, classifier='nolib') - - print(f"\n=== SUCCESS ===") - print(f"SNAPSHOT {version} deployed to {SNAPSHOT_REPO_URL}") - print(f"\nTo use in Maven:") - print(f" ") - print(f" {GROUP_ID}") - print(f" {ARTIFACT_ID}") - print(f" {version}") - print(f" ") - print(f"\nWith repository:") - print(f" ") - print(f" central-snapshots") - print(f" {SNAPSHOT_REPO_URL}") - print(f" true") - print(f" ") + try: + settings_path = os.path.join(staging_dir, 'settings.xml') + + # Create Maven settings with credentials + create_settings_xml(settings_path) + + # Create artifacts + print("\n=== Creating artifacts ===") + pom_path = create_pom(staging_dir, version) + combined_jar = create_combined_jar(artifact_dir, staging_dir, version) + sources_jar = create_sources_jar(jdbc_root, staging_dir, version) + javadoc_jar = create_javadoc_jar(jdbc_root, staging_dir, version) + nolib_jar = create_nolib_jar(artifact_dir, staging_dir, version) + + # Deploy POM first (required for other artifacts) + print("\n=== Deploying POM ===") + deploy_file(settings_path, version, pom_path, packaging='pom') + + # Deploy main JAR + print("\n=== Deploying main JAR ===") + deploy_file(settings_path, version, combined_jar) + + # Deploy sources and javadoc + print("\n=== Deploying sources JAR ===") + deploy_file(settings_path, version, sources_jar, classifier='sources') + + print("\n=== Deploying javadoc JAR ===") + deploy_file(settings_path, version, javadoc_jar, classifier='javadoc') + + # Deploy nolib JAR + print("\n=== Deploying nolib JAR ===") + deploy_file(settings_path, version, nolib_jar, classifier='nolib') + + # Deploy architecture-specific JARs + print("\n=== Deploying architecture-specific JARs ===") + for build_name, classifier in ARCH_BUILDS.items(): + jar_path = os.path.join(artifact_dir, build_name, 'duckdb_jdbc.jar') + if os.path.exists(jar_path): + print(f"Deploying {classifier}...") + deploy_file(settings_path, version, jar_path, classifier=classifier) + else: + print(f"Warning: {jar_path} not found, skipping") + + print(f"\n=== SUCCESS ===") + print(f"SNAPSHOT {version} deployed to {SNAPSHOT_REPO_URL}") + print(f"\nTo use in Maven:") + print(f" ") + print(f" {GROUP_ID}") + print(f" {ARTIFACT_ID}") + print(f" {version}") + print(f" ") + print(f"\nWith repository:") + print(f" ") + print(f" central-snapshots") + print(f" {SNAPSHOT_REPO_URL}") + print(f" true") + print(f" ") + + finally: + # Clean up staging directory + shutil.rmtree(staging_dir, ignore_errors=True) if __name__ == '__main__': From bf86f05d6da88144428b274b2c326e7c2b7eff25 Mon Sep 17 00:00:00 2001 From: Jules Ivanic Date: Tue, 27 Jan 2026 19:43:23 +1100 Subject: [PATCH 5/5] Add documentation comments for JAR classifiers --- scripts/jdbc_maven_deploy_snapshot.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/scripts/jdbc_maven_deploy_snapshot.py b/scripts/jdbc_maven_deploy_snapshot.py index 679801096..1ad4b06ed 100755 --- a/scripts/jdbc_maven_deploy_snapshot.py +++ b/scripts/jdbc_maven_deploy_snapshot.py @@ -35,18 +35,21 @@ GROUP_ID = "org.duckdb" ARTIFACT_ID = "duckdb_jdbc" -# Mapping of build directories to Maven classifiers +# Mapping of build directories to Maven classifiers. +# These architecture-specific JARs contain native libraries for a single platform only, +# useful for reducing deployment size when the target platform is known. ARCH_BUILDS = { 'java-linux-amd64': 'linux_amd64', 'java-linux-aarch64': 'linux_arm64', - 'java-linux-amd64-musl': 'linux_amd64_musl', - 'java-linux-aarch64-musl': 'linux_arm64_musl', - 'java-osx-universal': 'macos_universal', + 'java-linux-amd64-musl': 'linux_amd64_musl', # Alpine Linux + 'java-linux-aarch64-musl': 'linux_arm64_musl', # Alpine Linux ARM + 'java-osx-universal': 'macos_universal', # Intel + Apple Silicon 'java-windows-amd64': 'windows_amd64', 'java-windows-aarch64': 'windows_arm64', } -# Builds to combine into the main (fat) JAR +# Builds to combine into the main (fat) JAR. +# The main JAR includes natives for all major platforms for convenience. COMBINE_BUILDS = ['java-linux-amd64', 'java-osx-universal', 'java-windows-amd64', 'java-linux-aarch64'] @@ -151,7 +154,16 @@ def create_combined_jar(artifact_dir, staging_dir, version): def create_nolib_jar(artifact_dir, staging_dir, version): - """Create a JAR without native libraries.""" + """ + Create a JAR without native libraries (nolib classifier). + + This variant contains only Java classes without any bundled native libraries. + Useful for: + - Custom native library management (loading from system path or custom location) + - Platforms not covered by pre-built natives (users compile their own) + - Smaller artifact size when natives are managed separately + - Container/deployment scenarios where natives are provided at infrastructure level + """ nolib_jar = os.path.join(staging_dir, f'duckdb_jdbc-{version}-nolib.jar') base_jar = os.path.join(artifact_dir, 'java-linux-amd64', 'duckdb_jdbc.jar')