diff --git a/.github/workflows/sync-and-build.yml b/.github/workflows/sync-and-build.yml
index fdaa6e1f1..21a2a4e1a 100644
--- a/.github/workflows/sync-and-build.yml
+++ b/.github/workflows/sync-and-build.yml
@@ -1,282 +1,286 @@
-name: Sync and Build
-
-on:
- workflow_dispatch:
- workflow_call:
- inputs:
- docker_image:
- description: Docker image to use for the build
- required: false
- type: string
- default: kmake-image:ver.1.0
- build_matrix:
- description: Build matrix for multi target builds
- type: string
- required: true
-
-permissions:
- packages: read
-
-jobs:
- sync-and-build:
- runs-on:
- group: GHA-video-Prd-SelfHosted-RG
- labels: [self-hosted, video-prd-u2204-x64-large-od-ephem]
-
- steps:
- - name: Pull Docker image
- uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main
- with:
- image: ${{ inputs.docker_image }}
-
- - name: Sync codebase
- uses: qualcomm-linux/video-driver/.github/actions/sync@video.upstream(stage)
- with:
- event_name: ${{ github.event_name }}
- pr_ref: ${{ github.event.pull_request.head.ref }}
- pr_repo: ${{ github.event.pull_request.head.repo.full_name }}
- base_ref: ${{ github.ref_name }}
- caller_workflow: build
-
- - name: Build workspace
- uses: qualcomm-linux/video-driver/.github/actions/build@video.upstream(stage)
- with:
- docker_image: kmake-image:ver.1.0
- workspace_path: ${{ github.workspace }}
-
- - name: Download iris_test_app from the s3
- shell: bash
- run: |
- set -euo pipefail
- mkdir -p "${{github.workspace }}/v4l-video-test-app/build/"
- echo " syncing files from s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/"
- aws s3 sync "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/" "${{ github.workspace }}/v4l-video-test-app/build/"
- echo " ✅ Download complete"
- ls ${{ github.workspace }}/v4l-video-test-app/build/
-
- - name: Download firmware file from S3
- shell: bash
- run: |
- set -euo pipefail
- mkdir -p "${{ github.workspace }}/downloads"
- echo "📥 Syncing files from S3 path: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/"
- aws s3 sync "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/" "${{ github.workspace }}/downloads"
- echo "✅ Download complete"
- [ -f "${{ github.workspace }}/downloads/vpu20_1v.mbn" ] || { echo "❌ Missing vpu20_1v.mbn"; exit 1; }
-
- - name: Download the video-contents for testing
- shell: bash
- run: |
- set -euo pipefail
- mkdir -p "${{ github.workspace }}/downloads"
- echo "Downloading the video-content files"
- wget -q https://github.com/qualcomm-linux/qcom-linux-testkit/releases/download/IRIS-Video-Files-v1.0/video_clips_iris.tar.gz \
- -O "${{ github.workspace }}/downloads/video_clips_iris.tar.gz"
- [ -f "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" ] || { echo "❌ Failed to download video_clips_iris.tar.gz"; exit 1; }
-
- - name: Prepare /data/vendor/iris_test_app and list contents
- shell: bash
- run: |
- set -euo pipefail
- data_dir="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app"
- mkdir -p "$data_dir"
- cp "v4l-video-test-app/build/iris_v4l2_test" "$data_dir/"
- cp "${{ github.workspace }}/downloads/vpu20_1v.mbn" "$data_dir/"
- cp "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" "$data_dir/"
- echo "📂 Contents of $data_dir:"
- ls -lh "$data_dir"
-
- - name: Create compressed kernel ramdisk archives
- shell: bash
- run: |
- set -euo pipefail
- cd "${{ github.workspace }}/kobj/tar-install"
- find lib/modules data | cpio -o -H newc --owner=0:0 | gzip -9 > "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz"
- cd - > /dev/null
- ls -lh "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz"
-
- - name: Download meta-qcom stable initramfs artifacts from S3
- shell: bash
- run: |
- set -euo pipefail
- mkdir -p "${{ github.workspace }}/downloads"
- echo "🔍 Fetching initramfs files from S3 bucket: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/"
- aws s3 cp s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz "${{ github.workspace }}/downloads/"
- echo "Initramfs files downloaded to: ${{ github.workspace }}/downloads"
-
- - name: Decompress ramdisk files and rename .cpio.gz files
- shell: bash
- run: |
- set -euo pipefail
- cd "${{ github.workspace }}/downloads"
- echo " Decompressing and renaming .cpio.gz files..."
- gunzip -c initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz > kerneltest.cpio
-
- - name: Merge and repackage initramfs
- shell: bash
- run: |
- set -euo pipefail
- echo "🔧 Starting repackaging process"
-
- workspace="${{ github.workspace }}"
- mkdir -p "$workspace/combineramdisk"
- cp "$workspace/local-kernel-ramdisk.cpio.gz" "$workspace/combineramdisk/"
- cd "$workspace/combineramdisk"
-
- # Decompress local-kernel-ramdisk
- mv local-kernel-ramdisk.cpio.gz local-kernel-ramdisk.cpio.gz.bak
- gunzip -c local-kernel-ramdisk.cpio.gz.bak > local-kernel-ramdisk.cpio
-
- # Copy kerneltest from downloads
- cp "$workspace/downloads/kerneltest.cpio" .
-
- # Merge kerneltest and local-kernel-ramdisk
- cat kerneltest.cpio local-kernel-ramdisk.cpio > video-merged.cpio
- gzip -9 video-merged.cpio
-
- # Create temp workspace to clean up archive
- mkdir -p temp_merge
- cd temp_merge
- cpio -id --no-absolute-filenames < ../kerneltest.cpio
- cpio -id --no-absolute-filenames < ../local-kernel-ramdisk.cpio
- cd ..
-
- # Remove old merged archive
- rm -f video-merged.cpio.gz
-
- # Repackage clean archive
- cd temp_merge
- find . | cpio -o -H newc --owner=0:0 > ../video-merged.cpio
- cd ..
- gzip -9 video-merged.cpio
-
- # Cleanup
- rm -rf temp_merge kerneltest.cpio local-kernel-ramdisk.cpio
- echo "Final archive: $workspace/combineramdisk/video-merged.cpio.gz"
- ls -lh "$workspace/combineramdisk/video-merged.cpio.gz"
-
- - name: Validate build_matrix and jq
- shell: bash
- run: |
- set -euo pipefail
- machines_json='${{ inputs.build_matrix }}'
- if ! command -v jq >/dev/null 2>&1; then
- echo "❌ jq is not installed on this runner. Please install jq."
- exit 1
- fi
- echo "$machines_json" | jq -e . >/dev/null
- [ "$(echo "$machines_json" | jq length)" -gt 0 ] || { echo "❌ build_matrix is empty"; exit 1; }
- echo "✅ build_matrix is valid JSON"
-
- - name: Append artifacts to S3 upload list
- shell: bash
- run: |
- set -euo pipefail
- workspace="${{ github.workspace }}"
- file_list="$workspace/artifacts/file_list.txt"
- mkdir -p "$workspace/artifacts"
-
- # Fresh file_list
- : > "$file_list"
-
- # Package lib/modules (xz-compressed) — exclude risky symlinks
- mod_root="$workspace/kobj/tar-install/lib/modules"
- [ -d "$mod_root" ] || { echo "❌ Missing directory: $mod_root"; exit 1; }
- tar -C "$workspace/kobj/tar-install" \
- --exclude='lib/modules/*/build' \
- --exclude='lib/modules/*/source' \
- --numeric-owner --owner=0 --group=0 \
- -cJf "$workspace/modules.tar.xz" lib/modules
-
- # Safety checks on the tar
- if tar -Jtvf "$workspace/modules.tar.xz" | grep -q ' -> '; then
- echo "❌ Symlinks found in modules archive (should be none)"; exit 1
- fi
- if tar -Jtf "$workspace/modules.tar.xz" | grep -Eq '^/|(^|/)\.\.(/|$)'; then
- echo "❌ Unsafe paths found in modules archive"; exit 1
- fi
-
- echo "$workspace/modules.tar.xz" >> "$file_list"
- echo "✅ Queued for upload: $workspace/modules.tar.xz"
-
- # Kernel Image + merged video ramdisk (no local ramdisk)
- IMAGE_PATH="$workspace/kobj/arch/arm64/boot/Image"
- VMLINUX_PATH="$workspace/kobj/vmlinux"
- MERGED_PATH="$workspace/combineramdisk/video-merged.cpio.gz"
-
- [ -f "$IMAGE_PATH" ] || { echo "❌ Missing expected file: $IMAGE_PATH"; exit 1; }
- [ -f "$VMLINUX_PATH" ] || { echo "❌ Missing expected file: $VMLINUX_PATH"; exit 1; }
- [ -f "$MERGED_PATH" ] || { echo "❌ Missing merged cpio: $MERGED_PATH"; exit 1; }
-
- echo "$IMAGE_PATH" >> "$file_list"
- echo "✅ Queued for upload: $IMAGE_PATH"
- echo "$VMLINUX_PATH" >> "$file_list"
- echo "✅ Queued for upload: $VMLINUX_PATH"
- echo "$MERGED_PATH" >> "$file_list"
- echo "✅ Queued for upload: $MERGED_PATH"
-
- # Loop through all machines from the build_matrix input and add DTBs
- machines='${{ inputs.build_matrix }}'
- for machine in $(echo "$machines" | jq -r '.[].machine'); do
- dtb="$workspace/kobj/arch/arm64/boot/dts/qcom/${machine}.dtb"
- if [ -f "$dtb" ]; then
- echo "$dtb" >> "$file_list"
- echo "✅ Queued for upload: $dtb"
- else
- echo "❌ Missing DTB: $dtb"
- exit 1
- fi
- done
-
- echo "----- Files queued for S3 upload -----"
- cat "$file_list"
-
- - name: Upload all artifacts to S3
- uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.upstream(stage)
- with:
- s3_bucket: qli-prd-video-gh-artifacts
- local_file: ${{ github.workspace }}/artifacts/file_list.txt
- mode: multi-upload
- upload_location: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.run_id }}-${{ github.run_attempt }}
-
-
- - name: Clean up
- if: always()
- shell: bash
- run: |
- set -euo pipefail
- ws="${{ github.workspace }}"
- rm -rf "$ws/artqifacts" || true
- rm -rf "$ws/combineramdisk" || true
- rm -rf "$ws/downloads" || true
- rm -rf "$ws/kobj" || true
- rm -f "$ws/modules.tar.xz" || true
- rm -f "$ws/local-kernel-ramdisk.cpio.gz" || true
-
-
- - name: Update summary
- if: success() || failure()
- shell: bash
- run: |
- status="${{ steps.build_workspace.outcome }}"
- if [ "$status" = "success" ]; then
- summary=":heavy_check_mark: Build Success"
- else
- summary=":x: Build Failed"
- fi
-
- ws="${{ github.workspace }}"
- file_list="$ws/artifacts/file_list.txt"
-
- {
- echo "Build Summary
"
- echo "$summary"
- if [ -f "$file_list" ]; then
- echo ""
- echo "Artifacts queued for upload:"
- while IFS= read -r line; do
- echo "- $line"
- done < "$file_list"
- fi
- echo " "
+name: Sync and Build
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ inputs:
+ docker_image:
+ description: Docker image to use for the build
+ required: false
+ type: string
+ default: kmake-image:ver.1.0
+ build_matrix:
+ description: Build matrix for multi target builds
+ type: string
+ required: true
+
+permissions:
+ packages: read
+
+jobs:
+ sync-and-build:
+ runs-on:
+ group: GHA-video-Prd-SelfHosted-RG
+ labels: [self-hosted, video-prd-u2204-x64-large-od-ephem]
+
+ steps:
+ - name: Pull Docker image
+ uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main
+ with:
+ image: ${{ inputs.docker_image }}
+
+ - name: Sync codebase
+ uses: qualcomm-linux/video-driver/.github/actions/sync@video.upstream(stage)
+ with:
+ event_name: ${{ github.event_name }}
+ pr_ref: ${{ github.event.pull_request.head.ref }}
+ pr_repo: ${{ github.event.pull_request.head.repo.full_name }}
+ base_ref: ${{ github.ref_name }}
+ caller_workflow: build
+
+ - name: Build workspace
+ uses: qualcomm-linux/video-driver/.github/actions/build@video.upstream(stage)
+ with:
+ docker_image: kmake-image:ver.1.0
+ workspace_path: ${{ github.workspace }}
+
+ - name: Download iris_test_app from the s3
+ shell: bash
+ run: |
+ set -euo pipefail
+ mkdir -p "${{github.workspace }}/v4l-video-test-app/build/"
+ echo " syncing files from s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/"
+ aws s3 sync "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/" "${{ github.workspace }}/v4l-video-test-app/build/"
+ echo " ✅ Download complete"
+ ls ${{ github.workspace }}/v4l-video-test-app/build/
+
+ - name: Download firmware file from S3
+ shell: bash
+ run: |
+ set -euo pipefail
+ mkdir -p "${{ github.workspace }}/downloads"
+ echo "📥 Syncing files from S3 path: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/"
+ aws s3 sync "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/" "${{ github.workspace }}/downloads"
+ echo "✅ Download complete"
+ [ -f "${{ github.workspace }}/downloads/vpu20_1v.mbn" ] || { echo "❌ Missing vpu20_1v.mbn"; exit 1; }
+
+ - name: Download the video-contents for testing
+ shell: bash
+ run: |
+ set -euo pipefail
+ mkdir -p "${{ github.workspace }}/downloads"
+ echo "Downloading the video-content files"
+ wget -q https://github.com/qualcomm-linux/qcom-linux-testkit/releases/download/IRIS-Video-Files-v1.0/video_clips_iris.tar.gz \
+ -O "${{ github.workspace }}/downloads/video_clips_iris.tar.gz"
+ [ -f "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" ] || { echo "❌ Failed to download video_clips_iris.tar.gz"; exit 1; }
+
+ - name: Prepare /data/vendor/iris_test_app and list contents
+ shell: bash
+ run: |
+ set -euo pipefail
+ data_dir="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app"
+ data_dir2="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app/firmware"
+ mkdir -p "$data_dir"
+ mkdir -p "$data_dir2"
+ cp "v4l-video-test-app/build/iris_v4l2_test" "$data_dir/"
+ cp "${{ github.workspace }}/downloads/vpu20_1v.mbn" "$data_dir2/"
+
+ cp "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" "$data_dir/"
+ echo "📂 Contents of $data_dir:"
+ ls -lh "$data_dir"
+
+ - name: Create compressed kernel ramdisk archives
+ shell: bash
+ run: |
+ set -euo pipefail
+ cd "${{ github.workspace }}/kobj/tar-install"
+ find lib/modules data | cpio -o -H newc --owner=0:0 | gzip -9 > "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz"
+ cd - > /dev/null
+ ls -lh "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz"
+
+ - name: Download meta-qcom stable initramfs artifacts from S3
+ shell: bash
+ run: |
+ set -euo pipefail
+ mkdir -p "${{ github.workspace }}/downloads"
+ echo "🔍 Fetching initramfs files from S3 bucket: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/"
+ aws s3 cp s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz "${{ github.workspace }}/downloads/"
+ echo "Initramfs files downloaded to: ${{ github.workspace }}/downloads"
+
+ - name: Decompress ramdisk files and rename .cpio.gz files
+ shell: bash
+ run: |
+ set -euo pipefail
+ cd "${{ github.workspace }}/downloads"
+ echo " Decompressing and renaming .cpio.gz files..."
+ gunzip -c initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz > kerneltest.cpio
+
+ - name: Merge and repackage initramfs
+ shell: bash
+ run: |
+ set -euo pipefail
+ echo "🔧 Starting repackaging process"
+
+ workspace="${{ github.workspace }}"
+ mkdir -p "$workspace/combineramdisk"
+ cp "$workspace/local-kernel-ramdisk.cpio.gz" "$workspace/combineramdisk/"
+ cd "$workspace/combineramdisk"
+
+ # Decompress local-kernel-ramdisk
+ mv local-kernel-ramdisk.cpio.gz local-kernel-ramdisk.cpio.gz.bak
+ gunzip -c local-kernel-ramdisk.cpio.gz.bak > local-kernel-ramdisk.cpio
+
+ # Copy kerneltest from download
+ cp "$workspace/downloads/kerneltest.cpio" .
+
+ # Merge kerneltest and local-kernel-ramdisk
+ cat kerneltest.cpio local-kernel-ramdisk.cpio > video-merged.cpio
+ gzip -9 video-merged.cpio
+
+ # Create temp workspace to clean up archive
+ mkdir -p temp_merge
+ cd temp_merge
+ cpio -id --no-absolute-filenames < ../kerneltest.cpio
+ cpio -id --no-absolute-filenames < ../local-kernel-ramdisk.cpio
+ cd ..
+
+ # Remove old merged archive
+ rm -f video-merged.cpio.gz
+
+ # Repackage clean archive
+ cd temp_merge
+ find . | cpio -o -H newc --owner=0:0 > ../video-merged.cpio
+ cd ..
+ gzip -9 video-merged.cpio
+
+ # Cleanup
+ rm -rf temp_merge kerneltest.cpio local-kernel-ramdisk.cpio
+ echo "Final archive: $workspace/combineramdisk/video-merged.cpio.gz"
+ ls -lh "$workspace/combineramdisk/video-merged.cpio.gz"
+
+ - name: Validate build_matrix and jq
+ shell: bash
+ run: |
+ set -euo pipefail
+ machines_json='${{ inputs.build_matrix }}'
+ if ! command -v jq >/dev/null 2>&1; then
+ echo "❌ jq is not installed on this runner. Please install jq."
+ exit 1
+ fi
+ echo "$machines_json" | jq -e . >/dev/null
+ [ "$(echo "$machines_json" | jq length)" -gt 0 ] || { echo "❌ build_matrix is empty"; exit 1; }
+ echo "✅ build_matrix is valid JSON"
+
+ - name: Append artifacts to S3 upload list
+ shell: bash
+ run: |
+ set -euo pipefail
+ workspace="${{ github.workspace }}"
+ file_list="$workspace/artifacts/file_list.txt"
+ mkdir -p "$workspace/artifacts"
+
+ # Fresh file_list
+ : > "$file_list"
+
+ # Package lib/modules (xz-compressed) — exclude risky symlinks
+ mod_root="$workspace/kobj/tar-install/lib/modules"
+ [ -d "$mod_root" ] || { echo "❌ Missing directory: $mod_root"; exit 1; }
+ tar -C "$workspace/kobj/tar-install" \
+ --exclude='lib/modules/*/build' \
+ --exclude='lib/modules/*/source' \
+ --numeric-owner --owner=0 --group=0 \
+ -cJf "$workspace/modules.tar.xz" lib/modules
+
+ # Safety checks on the tar
+ if tar -Jtvf "$workspace/modules.tar.xz" | grep -q ' -> '; then
+ echo "❌ Symlinks found in modules archive (should be none)"; exit 1
+ fi
+ if tar -Jtf "$workspace/modules.tar.xz" | grep -Eq '^/|(^|/)\.\.(/|$)'; then
+ echo "❌ Unsafe paths found in modules archive"; exit 1
+ fi
+
+ echo "$workspace/modules.tar.xz" >> "$file_list"
+ echo "✅ Queued for upload: $workspace/modules.tar.xz"
+
+ # Kernel Image + merged video ramdisk (no local ramdisk)
+ IMAGE_PATH="$workspace/kobj/arch/arm64/boot/Image"
+ VMLINUX_PATH="$workspace/kobj/vmlinux"
+ MERGED_PATH="$workspace/combineramdisk/video-merged.cpio.gz"
+
+ [ -f "$IMAGE_PATH" ] || { echo "❌ Missing expected file: $IMAGE_PATH"; exit 1; }
+ [ -f "$VMLINUX_PATH" ] || { echo "❌ Missing expected file: $VMLINUX_PATH"; exit 1; }
+ [ -f "$MERGED_PATH" ] || { echo "❌ Missing merged cpio: $MERGED_PATH"; exit 1; }
+
+ echo "$IMAGE_PATH" >> "$file_list"
+ echo "✅ Queued for upload: $IMAGE_PATH"
+ echo "$VMLINUX_PATH" >> "$file_list"
+ echo "✅ Queued for upload: $VMLINUX_PATH"
+ echo "$MERGED_PATH" >> "$file_list"
+ echo "✅ Queued for upload: $MERGED_PATH"
+
+ # Loop through all machines from the build_matrix input and add DTBs
+ machines='${{ inputs.build_matrix }}'
+ for machine in $(echo "$machines" | jq -r '.[].machine'); do
+ dtb="$workspace/kobj/arch/arm64/boot/dts/qcom/${machine}.dtb"
+ if [ -f "$dtb" ]; then
+ echo "$dtb" >> "$file_list"
+ echo "✅ Queued for upload: $dtb"
+ else
+ echo "❌ Missing DTB: $dtb"
+ exit 1
+ fi
+ done
+
+ echo "----- Files queued for S3 upload -----"
+ cat "$file_list"
+
+ - name: Upload all artifacts to S3
+ uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.upstream(stage)
+ with:
+ s3_bucket: qli-prd-video-gh-artifacts
+ local_file: ${{ github.workspace }}/artifacts/file_list.txt
+ mode: multi-upload
+ upload_location: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.run_id }}-${{ github.run_attempt }}
+
+
+ - name: Clean up
+ if: always()
+ shell: bash
+ run: |
+ set -euo pipefail
+ ws="${{ github.workspace }}"
+ rm -rf "$ws/artqifacts" || true
+ rm -rf "$ws/combineramdisk" || true
+ rm -rf "$ws/downloads" || true
+ rm -rf "$ws/kobj" || true
+ rm -f "$ws/modules.tar.xz" || true
+ rm -f "$ws/local-kernel-ramdisk.cpio.gz" || true
+
+
+ - name: Update summary
+ if: success() || failure()
+ shell: bash
+ run: |
+ status="${{ steps.build_workspace.outcome }}"
+ if [ "$status" = "success" ]; then
+ summary=":heavy_check_mark: Build Success"
+ else
+ summary=":x: Build Failed"
+ fi
+
+ ws="${{ github.workspace }}"
+ file_list="$ws/artifacts/file_list.txt"
+
+ {
+ echo "Build Summary
"
+ echo "$summary"
+ if [ -f "$file_list" ]; then
+ echo ""
+ echo "Artifacts queued for upload:"
+ while IFS= read -r line; do
+ echo "- $line"
+ done < "$file_list"
+ fi
+ echo " "
+
} >> "$GITHUB_STEP_SUMMARY"
\ No newline at end of file
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index d6b8036e5..f65b70280 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,154 +1,199 @@
-name: _test
-description: Run tests on LAVA
-
-on:
- workflow_call:
- inputs:
- docker_image:
- description: Docker image
- type: string
- required: true
- default: kmake-image:ver.1.0
-
- build_matrix:
- description: Build matrix for multi target builds (stringified JSON)
- type: string
- required: true
-
- full_matrix:
- description: Full matrix containing lava description (stringified JSON)
- type: string
- required: true
-
-jobs:
- test:
- runs-on:
- group: GHA-video-Prd-SelfHosted-RG
- labels: [ self-hosted, video-prd-u2204-x64-large-od-ephem ]
- strategy:
- fail-fast: false
- matrix:
- build_matrix: ${{ fromJson(inputs.build_matrix) }}
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ github.ref }}
- fetch-depth: 0
-
- - name: Pull docker image
- uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main
- with:
- image: ${{ inputs.docker_image }}
-
- - name: Download URLs list (presigned_urls.json)
- uses: actions/download-artifact@v4
- with:
- name: presigned_urls.json
- merge-multiple: true
- path: ${{ github.workspace }}
-
- - name: Clone lava job render scripts
- run: cd .. && git clone https://github.com/qualcomm-linux/job_render
-
- - name: Extract the LAVA machine name
- id: get_lavaname
- uses: actions/github-script@v7
- with:
- script: |
- const fullMatrix = JSON.parse(`${{ inputs.full_matrix }}`);
- const currentMachine = `${{ matrix.build_matrix.machine }}`;
-
- const entry = fullMatrix.find(item => item.machine === currentMachine);
- if (!entry) {
- core.setFailed(`No entry found in full matrix for machine: ${currentMachine}`);
- return;
- }
-
- const lavaname = entry.lavaname;
- console.log(`Lavaname for ${currentMachine} is ${lavaname}`);
- core.setOutput("LAVANAME", lavaname);
-
- - name: Create lava job definition
- id: create_job_definition
- uses: qualcomm-linux/video-driver/.github/actions/lava_job_render@video.upstream(stage)
- with:
- docker_image: ${{ inputs.docker_image }}
- env:
- FIRMWARE: ${{ matrix.build_matrix.firmware }}
- MACHINE: ${{ matrix.build_matrix.machine }}
- LAVA_NAME: ${{ steps.get_lavaname.outputs.LAVANAME }}
-
- - name: Submit lava job
- id: submit_job
- run: |
- cd ../job_render
- job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{ secrets.LAVA_OSS_USER }} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml")
- job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id"
- echo "job_id=$job_id" >> $GITHUB_OUTPUT
- echo "job_url=$job_url" >> $GITHUB_OUTPUT
- echo "Lava Job: $job_url"
- echo "JOB_ID=$job_id" >> $GITHUB_ENV
-
- - name: Check lava job results
- id: check_job
- run: |
- STATE=""
- START_TIME=$(date +%s)
- while [ "$STATE" != "Finished" ]; do
- state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{ secrets.LAVA_OSS_USER }} production && lavacli -i production jobs show $JOB_ID" | grep state)
- STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
- echo "Current status: $STATE"
- CURRENT_TIME=$(date +%s)
- ELAPSED_TIME=$(((CURRENT_TIME - START_TIME)/3600))
- if [ $ELAPSED_TIME -ge 2 ]; then
- echo "Timeout: 2 hours exceeded."
- summary=":x: Lava job exceeded time limit."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 1
- fi
- sleep 30
- done
- health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{ secrets.LAVA_OSS_USER }} production && lavacli -i production jobs show $JOB_ID" | grep Health)
- HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
- if [[ "$HEALTH" == "Complete" ]]; then
- TEST_RESULTS=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{ secrets.LAVA_OSS_USER }} production && lavacli -i production results $JOB_ID" | grep fail || echo "Pass")
- if [[ "$TEST_RESULTS" == "Pass" ]]; then
- echo "Lava job passed."
- summary=":heavy_check_mark: Lava job passed."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 0
- else
- echo "Lava job failed."
- summary=":x: Lava job failed."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 1
- fi
- else
- echo "Lava job failed."
- summary=":x: Lava job failed."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 1
- fi
-
- - name: Update summary
- if: success() || failure()
- shell: bash
- run: |
- if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then
- status=":x: Test job failed"
- else
- status="${{ steps.check_job.outputs.summary }}"
- job_url="${{ steps.submit_job.outputs.job_url }}"
- job_id="${{ steps.submit_job.outputs.job_id }}"
- fi
- SUMMARY='
- '${status}'
-
- JOB ID: '${job_id}'
-
- '
- echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY
- # JOB_INFO="- [Job $job_id on ${{ matrix.build_matrix.machine }}]($job_url)"
- # echo -e "$JOB_INFO" > job_info.md
+name: _test
+description: Run tests on LAVA
+
+on:
+ workflow_call:
+ inputs:
+ docker_image:
+ description: Docker image
+ type: string
+ required: true
+ default: kmake-image:ver.1.0
+
+ build_matrix:
+ description: Build matrix for multi target builds (stringified JSON)
+ type: string
+ required: true
+
+ full_matrix:
+ description: Full matrix containing lava description (stringified JSON)
+ type: string
+ required: true
+
+jobs:
+ test:
+ runs-on:
+ group: GHA-video-Prd-SelfHosted-RG
+ labels: [ self-hosted, video-prd-u2204-x64-large-od-ephem ]
+ strategy:
+ fail-fast: false
+ matrix:
+ build_matrix: ${{ fromJson(inputs.build_matrix) }}
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.ref }}
+ fetch-depth: 0
+
+ - name: Pull docker image
+ uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main
+ with:
+ image: ${{ inputs.docker_image }}
+
+ - name: Download URLs list (presigned_urls.json)
+ uses: actions/download-artifact@v4
+ with:
+ name: presigned_urls.json
+ merge-multiple: true
+ path: ${{ github.workspace }}
+
+ - name: Clone lava job render scripts
+ run: cd .. && git clone https://github.com/qualcomm-linux/job_render
+
+ - name: Extract the LAVA machine name
+ id: get_lavaname
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fullMatrix = JSON.parse(`${{ inputs.full_matrix }}`);
+ const currentMachine = `${{ matrix.build_matrix.machine }}`;
+
+ const entry = fullMatrix.find(item => item.machine === currentMachine);
+ if (!entry) {
+ core.setFailed(`No entry found in full matrix for machine: ${currentMachine}`);
+ return;
+ }
+
+ const lavaname = entry.lavaname;
+ console.log(`Lavaname for ${currentMachine} is ${lavaname}`);
+ core.setOutput("LAVANAME", lavaname);
+
+ - name: Create lava job definition
+ id: create_job_definition
+ uses: qualcomm-linux/video-driver/.github/actions/lava_job_render@video.upstream(stage)
+ with:
+ docker_image: ${{ inputs.docker_image }}
+ env:
+ FIRMWARE: ${{ matrix.build_matrix.firmware }}
+ MACHINE: ${{ matrix.build_matrix.machine }}
+ LAVA_NAME: ${{ steps.get_lavaname.outputs.LAVANAME }}
+
+ - name: Submit lava job
+ id: submit_job
+ run: |
+ cd ../job_render
+ job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{ secrets.LAVA_OSS_USER }} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml")
+ job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id"
+ echo "job_id=$job_id" >> $GITHUB_OUTPUT
+ echo "job_url=$job_url" >> $GITHUB_OUTPUT
+ echo "Lava Job: $job_url"
+ echo "JOB_ID=$job_id" >> $GITHUB_ENV
+
+ - name: Check lava job results
+ id: check_job
+ run: |
+ STATE=""
+ START_TIME=$(date +%s)
+
+ # Wait for job to finish
+ while [ "$STATE" != "Finished" ]; do
+ state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" \
+ ${{ inputs.docker_image }} sh -c \
+ "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} \
+ --uri https://lava-oss.qualcomm.com/RPC2 \
+ --username ${{ secrets.LAVA_OSS_USER }} production && \
+ lavacli -i production jobs show $JOB_ID" | grep state)
+
+ STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
+ echo "Current status: $STATE"
+
+ CURRENT_TIME=$(date +%s)
+ ELAPSED_TIME=$(((CURRENT_TIME - START_TIME)/3600))
+ if [ $ELAPSED_TIME -ge 2 ]; then
+ echo "Timeout: 2 hours exceeded."
+ summary=":x: Lava job exceeded time limit."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+ sleep 30
+ done
+
+ # Check job health
+ health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" \
+ ${{ inputs.docker_image }} sh -c \
+ "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} \
+ --uri https://lava-oss.qualcomm.com/RPC2 \
+ --username ${{ secrets.LAVA_OSS_USER }} production && \
+ lavacli -i production jobs show $JOB_ID" | grep Health)
+
+ HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
+ echo "Health: $HEALTH"
+
+ if [[ "$HEALTH" != "Complete" ]]; then
+ echo "Lava job health is not Complete."
+ summary=":x: Lava job failed (Health: $HEALTH)."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+
+ # Fetch detailed results once
+ docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" \
+ ${{ inputs.docker_image }} sh -c \
+ "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} \
+ --uri https://lava-oss.qualcomm.com/RPC2 \
+ --username ${{ secrets.LAVA_OSS_USER }} production && \
+ lavacli -i production results $JOB_ID" > lava_results.txt
+
+ echo "=== LAVA RESULTS (first 200 lines) ==="
+ head -200 lava_results.txt || true
+
+ # Decide pass/fail based ONLY on 0_video_pre-merge-tests / Video_V4L2_Runner
+ if awk '
+ # track whether we are inside definition: 0_video_pre-merge-tests
+ /^definition: 0_video_pre-merge-tests$/ { in_def=1; next }
+ /^definition:/ && $2 != "0_video_pre-merge-tests" { in_def=0 }
+
+ # inside that definition, track Video_V4L2_Runner case and its result
+ in_def && /^case: Video_V4L2_Runner$/ { in_case=1; next }
+ in_def && in_case && /^result:/ {
+ if ($2 == "fail") {
+ print "Found FAIL for 0_video_pre-merge-tests / Video_V4L2_Runner";
+ exit 1;
+ }
+ in_case=0; # reset for next block
+ }
+ END { exit 0 }
+ ' lava_results.txt; then
+ echo "Lava job passed (0_video_pre-merge-tests base + overlay)."
+ summary=":heavy_check_mark: Lava job passed."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 0
+ else
+ echo "Lava job failed in 0_video_pre-merge-tests / Video_V4L2_Runner."
+ summary=":x: Lava job failed."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+
+ - name: Update summary
+ if: success() || failure()
+ shell: bash
+ run: |
+ if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then
+ status=":x: Test job failed"
+ else
+ status="${{ steps.check_job.outputs.summary }}"
+ job_url="${{ steps.submit_job.outputs.job_url }}"
+ job_id="${{ steps.submit_job.outputs.job_id }}"
+ fi
+ SUMMARY='
+ '${status}'
+
+ JOB ID: '${job_id}'
+
+ '
+ echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY
+ # JOB_INFO="- [Job $job_id on ${{ matrix.build_matrix.machine }}]($job_url)"
+ # echo -e "$JOB_INFO" > job_info.md
\ No newline at end of file
diff --git a/venus/vdec.c b/venus/vdec.c
index 4a6641fdf..6b3d5e591 100644
--- a/venus/vdec.c
+++ b/venus/vdec.c
@@ -565,7 +565,13 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.flags |= HFI_BUFFERFLAG_EOS;
- if (IS_V6(inst->core) && is_fw_rev_or_older(inst->core, 1, 0, 87))
+
+ /* Send NULL EOS addr for only IRIS2 (SM8250),for firmware <= 1.0.87.
+ * SC7280 also reports "1.0." parsed as 1.0.0; restricting to IRIS2
+ * avoids misapplying this quirk and breaking VP9 decode on SC7280.
+ */
+
+ if (IS_IRIS2(inst->core) && is_fw_rev_or_older(inst->core, 1, 0, 87))
fdata.device_addr = 0;
else
fdata.device_addr = 0xdeadb000;