diff --git a/.github/actions/linux-uttest/action.yml b/.github/actions/linux-uttest/action.yml index 9f3888694..9f7808c1c 100644 --- a/.github/actions/linux-uttest/action.yml +++ b/.github/actions/linux-uttest/action.yml @@ -207,20 +207,23 @@ runs: else echo -e "No Failure logs" fi - # Copied the passed logs - if ls passed*.log 1> /dev/null 2>&1; then - cp passed*.log ${{ github.workspace }}/ut_log - echo -e "Passed logs Copied" - else - echo -e "No Passed logs" - fi - # Copied the Summary logs - if ls category*.log 1> /dev/null 2>&1; then - cp category*.log ${{ github.workspace }}/ut_log - echo -e "Category logs Copied" - else - echo -e "No Category logs" - fi + + log_dir="${{ github.workspace }}/ut_log" + copy_logs() { + local pattern=$1 + local name=$2 + + if ls ${pattern} 1> /dev/null 2>&1; then + cp ${pattern} "$log_dir" + echo -e "${name} logs Copied" + else + echo -e "No ${name} logs" + fi + } + copy_logs "passed*.log" "Passed" + copy_logs "category*.log" "Category" + copy_logs "all_cases*.log" "All cases collect" + if [ -e ut_failure_list.csv ];then cp ut_failure_list.csv ${{ github.workspace }}/ut_log/ut_failure_list.csv || true fi diff --git a/.github/scripts/check-ut.py b/.github/scripts/check-ut.py index 50b112a18..fa999ae7c 100644 --- a/.github/scripts/check-ut.py +++ b/.github/scripts/check-ut.py @@ -8,13 +8,18 @@ parser = argparse.ArgumentParser(description='Test results analyzer') parser.add_argument('-n', '--ut-name', type=str, default='', help='UT name') parser.add_argument('-i', '--input-files', nargs='+', help='JUnit XML files or log files') +parser.add_argument('-o', '--output-dir', type=str, default='.', help='Output directory for log files (default: current directory)') args = parser.parse_args() +os.makedirs(args.output_dir, exist_ok=True) + failures = [] summaries = [] failures_by_category = defaultdict(list) passed_cases = [] passed_by_category = defaultdict(list) +all_cases = [] +all_cases_by_category = defaultdict(list) category_totals = defaultdict(lambda: { 'Test cases': 0, 'Passed': 0, @@ -119,6 +124,8 @@ def print_md_row(row, print_header=False, failure_list=None): if failure_list is not None: failure_list.write(f"| {row_values} |\n") +def get_output_path(filename): + return os.path.join(args.output_dir, filename) def print_failures(failure_list=None): if not failures: @@ -149,13 +156,29 @@ def generate_failures_log(): if not category_failures: continue - log_filename = f"failures_{category}.log" + log_filename = get_output_path(f"failures_{category}.log") with open(log_filename, "w", encoding='utf-8') as log_file: for case in category_failures: class_name = get_classname(case) test_name = get_name(case) log_file.write(f"{category},{class_name},{test_name}\n") +def generate_all_cases_log(): + if not all_cases: + return + + for category, category_cases in all_cases_by_category.items(): + if not category_cases: + continue + + log_filename = get_output_path(f"all_cases_{category}.log") + with open(log_filename, "w", encoding='utf-8') as log_file: + for case in category_cases: + class_name = get_classname(case) + test_name = get_name(case) + status = get_result(case) + log_file.write(f"{category},{class_name},{test_name}\n") + def parse_log_file(log_file): with open(log_file, encoding='utf-8') as f: content = f.read() @@ -267,7 +290,30 @@ def process_xml_file(xml_file): parts_category = os.path.basename(xml_file).split('.')[0] category = determine_category(parts_category) + def process_suite(suite, category): + suite_cases_count = 0 + + for case in suite: + if hasattr(case, 'tests'): + suite_cases_count += process_suite(case, category) + else: + case._file_category = category + all_cases.append(case) + all_cases_by_category[category].append(case) + suite_cases_count += 1 + + if get_result(case) not in ["passed", "skipped"]: + case._file_category = category + failures.append(case) + elif get_result(case) == "passed": + case._file_category = category + passed_cases.append(case) + passed_by_category[category].append(case) + + return suite_cases_count + for suite in xml: + actual_cases_count = process_suite(suite, category) suite_summary = { 'Category': category, 'UT': ut, @@ -287,14 +333,9 @@ def process_xml_file(xml_file): category_totals[category]['Failures'] += suite_summary['Failures'] category_totals[category]['Errors'] += suite_summary['Errors'] - for case in suite: - if get_result(case) not in ["passed", "skipped"]: - case._file_category = category - failures.append(case) - elif get_result(case) == "passed": - case._file_category = category - passed_cases.append(case) - passed_by_category[category].append(case) + if suite.tests != actual_cases_count: + print(f"Warning: Suite '{ut}' has {suite.tests} tests in summary but {actual_cases_count} cases were processed", + file=sys.stderr) except Exception as e: print(f"Error processing {xml_file}: {e}", file=sys.stderr) @@ -306,7 +347,7 @@ def generate_passed_log(): if not category_passed: continue - log_filename = f"passed_{category}.log" + log_filename = get_output_path(f"passed_{category}.log") with open(log_filename, "w", encoding='utf-8') as log_file: for case in category_passed: class_name = get_classname(case) @@ -320,7 +361,7 @@ def generate_category_totals_log(): if totals['Test cases'] == 0: continue - log_filename = f"category_{category}.log" + log_filename = get_output_path(f"category_{category}.log") with open(log_filename, "w", encoding='utf-8') as log_file: log_file.write(f"Category: {category}\n") log_file.write(f"Test cases: {totals['Test cases']}\n") @@ -366,6 +407,8 @@ def print_summary(): print_md_row(totals) def main(): + os.makedirs(args.output_dir, exist_ok=True) + for input_file in args.input_files: if input_file.endswith('.log'): process_log_file(input_file) @@ -373,12 +416,15 @@ def main(): process_xml_file(input_file) else: print(f"Skipping unknown file type: {input_file}", file=sys.stderr) + if args.ut_name != "skipped_ut": - with open("ut_failure_list.csv", "w") as failure_list: + failure_list_path = get_output_path("ut_failure_list.csv") + with open(failure_list_path, "w", encoding='utf-8') as failure_list: print_failures(failure_list=failure_list) generate_failures_log() generate_passed_log() + generate_all_cases_log() generate_category_totals_log() print_summary() diff --git a/.github/scripts/ut_result_check.sh b/.github/scripts/ut_result_check.sh index a94007da1..7bb231661 100644 --- a/.github/scripts/ut_result_check.sh +++ b/.github/scripts/ut_result_check.sh @@ -174,6 +174,182 @@ check_skipped_ut() { fi } +categorize_failures() { + local failures_log="$1" + local all_ut_log="$2" + local output_dir="${3:-.}" + + # Check if required parameters are provided + if [[ $# -lt 2 ]]; then + echo "Usage: categorize_failures [output_dir]" + echo "Example: categorize_failures failures.txt all_ut.txt ./output" + return 1 + fi + + # Check if files exist + if [[ ! -f "$failures_log" ]]; then + echo "Error: Failures log file not found: $failures_log" + return 1 + fi + + if [[ ! -f "$all_ut_log" ]]; then + echo "Error: All UT log file not found: $all_ut_log" + return 1 + fi + + # Create output directory + mkdir -p "$output_dir" + + # Output file paths + local regression_file="$output_dir/regression_ut.txt" + local new_issue_file="$output_dir/new_issue_ut.txt" + local summary_file="$output_dir/summary.txt" + + # Clear output files (if they exist) + true > "$regression_file" + true > "$new_issue_file" + true > "$summary_file" + + # Counters + local regression_count=0 + local new_issue_count=0 + local total_failures=0 + + echo "Starting UT failure analysis..." + echo "Failures log: $failures_log" + echo "All UT log: $all_ut_log" + echo "Output directory: $output_dir" + echo "" + + # Process failures log line by line + while IFS= read -r line || [[ -n "$line" ]]; do + # Skip empty lines + if [[ -z "$line" ]]; then + continue + fi + + total_failures=$((total_failures + 1)) + + # Check if this line exists in all UT log + # Using grep -Fxq: -F fixed strings, -x whole line match, -q quiet mode + if grep -Fxq "$line" "$all_ut_log" 2>/dev/null; then + # Exists in all UT log -> Regression issue + regression_count=$((regression_count + 1)) + echo "$line" >> "$regression_file" + else + # Not found in all UT log -> New issue + new_issue_count=$((new_issue_count + 1)) + echo "$line" >> "$new_issue_file" + fi + done < "$failures_log" + + # Generate summary report + local timestamp + timestamp=$(date '+%Y-%m-%d %H:%M:%S') + + echo "" + echo "Analysis completed!" + echo "=================================" + echo "Total New failed UTs: $total_failures" + echo "Regression issues: $regression_count" + echo "New UTs issues: $new_issue_count" + echo "=================================" + echo "" + + # Display regression cases + if [[ $regression_count -gt 0 ]]; then + echo "REGRESSION CASES ISSUE ($regression_count):" + echo "---------------------------------" + while IFS= read -r line; do + echo " $line" + done < "$regression_file" + echo "" + else + echo "✅ No regression cases found." + echo "" + fi + + # Display new issue cases + if [[ $new_issue_count -gt 0 ]]; then + echo "NEW UT CASES ISSUE ($new_issue_count):" + echo "--------------------------------" + while IFS= read -r line; do + echo " $line" + done < "$new_issue_file" + echo "" + else + echo "✅ No new UT cases issue found." + echo "" + fi + + cat > "$summary_file" << EOF +Failed UT Categorization Report +================================ +Generated: $timestamp +Failures log file: $(basename "$failures_log") +All UT log file: $(basename "$all_ut_log") + +Statistics: +----------- +Total New failed UTs: $total_failures +Regression issues: $regression_count +New UTs issues: $new_issue_count + +Output Files: +------------- +Regression UT list: $(basename "$regression_file") ($regression_count items) +New issue UT list: $(basename "$new_issue_file") ($new_issue_count items) + +Detailed Lists: +--------------- + +EOF + + # Add regression UT list to summary + if [[ $regression_count -gt 0 ]]; then + { + echo "Regression Issues:" + echo "-----------" + cat "$regression_file" + } >> "$summary_file" + echo "" >> "$summary_file" + else + echo "✅ No regression issues found" >> "$summary_file" + echo "" >> "$summary_file" + fi + + # Add new issue UT list to summary + if [[ $new_issue_count -gt 0 ]]; then + { + echo "New Issues:" + echo "-----------" + cat "$new_issue_file" + } >> "$summary_file" + else + echo "✅ No new issues found" >> "$summary_file" + fi + + # Print summary to console + echo "" + echo "Analysis completed!" + echo "=================================" + echo "Total New failed UTs: $total_failures" + echo "Regression issues: $regression_count" + echo "New UTs issues: $new_issue_count" + echo "=================================" + echo "" + echo "Output files:" + echo " Regression UT list: $regression_file" + echo " New issue UT list: $new_issue_file" + echo " Detailed summary: $summary_file" + + # Show warning if no failures were found + if [[ $total_failures -eq 0 ]]; then + echo "" + echo "Note: No failed UT records found in the failures log file." + fi +} + # Main test runner for standard test suites (op_regression, op_extended, etc.) run_main_tests() { local suite="$1" @@ -216,6 +392,7 @@ run_main_tests() { local failed_count=0 passed_count=0 if [[ -f "failures_${suite}_filtered.log" ]]; then failed_count=$(wc -l < "failures_${suite}_filtered.log") + categorize_failures failures_${suite}_filtered.log all_cases_${suite}_reference.log categorize_failures fi if [[ -f "passed_${suite}.log" ]]; then passed_count=$(wc -l < "passed_${suite}.log") diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index dc1064712..16195fb2c 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -23,6 +23,10 @@ on: required: true type: string description: UT scope. one of `op_regression,op_transformers,op_extended,op_ut,skipped_ut,torch_xpu,op_regression_dev1` + test_type: + type: string + default: "build-cicd" + description: Test type, default is for CI tests, others (build or wheel)-(nightly, weekly or ondemand) permissions: read-all @@ -140,6 +144,8 @@ jobs: timeout-minutes: 30 permissions: issues: write + env: + REFERENCE_ISSUE_ID: 2440 steps: - name: Checkout torch-xpu-ops uses: actions/checkout@v4 @@ -148,6 +154,23 @@ jobs: with: pattern: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-${{ inputs.ut }}-* path: ${{ github.workspace }}/ut_log + - name: Download Baseline Artifact + run: | + mkdir baseline/ + cd baseline/ + if [[ "${{ inputs.test_type }}" != *"ly" ]];then + artifact_type="$(echo ${{ inputs.test_type }} |awk -F '-' '{print $1}')-nightly" + else + artifact_type="${{ inputs.test_type }}" + fi + gh --repo intel/torch-xpu-ops issue view ${REFERENCE_ISSUE_ID} --json body -q .body 2>&1 |tee body.txt + REFERENCE_RUN_ID="$(cat body.txt |grep "Inductor-${artifact_type}-LTS2" |sed 's/.*: *//' || echo '')" + if [ "${REFERENCE_RUN_ID}" != "" ];then + gh --repo intel/torch-xpu-ops run download ${REFERENCE_RUN_ID} -p "Inductor-XPU-UT-Data-*${{ inputs.ut }}*" + find Inductor-XPU-UT-Data-*${{ inputs.ut }}*/ -type f -path "*/Inductor-XPU-UT-Data-*/*" -print0 | \ + xargs -0 -I {} cp {} . + rm -rf Inductor-XPU-UT-Data-*${{ inputs.ut }}* || true + fi - name: Check UT Results run: | ls -al ${{ github.workspace }}/ut_log @@ -158,8 +181,33 @@ jobs: \( -name "failures_*.log" -o \ -name "passed_*.log" -o \ -name "category_*.log" -o \ + -name "all_cases_*.log" -o \ -name "reproduce_*.log" \) \ -exec mv {} ./ \; || true + + if find "${{ github.workspace }}/baseline" -type f -name "all_cases_*.log" | grep -q .; then + echo -e "All cases log collect" + find "${{ github.workspace }}/baseline" -type f -name "all_cases_*.log" \ + -exec sh -c 'for file; do + filename=$(basename "$file") + newname="${filename%.log}_reference.log" + mv "$file" "./$newname" + done' _ {} + 2>/dev/null || true + else + echo -e "No all cases log" + mkdir -p ${{ github.workspace }}/ut_log/baseline + find "${{ github.workspace }}/baseline/" -type f \ + \( -name "*.xml" \) \ + -exec mv {} ${{ github.workspace }}/ut_log/baseline \; || true + pip install junitparser + python ${{ github.workspace }}/.github/scripts/check-ut.py -n ${{ inputs.ut }} -i ${{ github.workspace }}/ut_log/baseline/*.xml -o ${{ github.workspace }}/ut_log/baseline + find "${{ github.workspace }}/ut_log/baseline" -type f -name "all_cases_*.log" \ + -exec sh -c 'for file; do + filename=$(basename "$file") + newname="${filename%.log}_reference.log" + mv "$file" "./$newname" + done' _ {} + 2>/dev/null || true + fi cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./ # get distributed known issues gh --repo intel/torch-xpu-ops issue view $UT_SKIP_ISSUE --json body -q .body |sed -E '/^(#|$)/d' > Known_issue.log.tmp @@ -186,6 +234,17 @@ jobs: sed -i 's/[[:space:]]*$//g' Known_issue.log bash ut_result_check.sh "${ut_name}" "${{ inputs.pytorch }}" done + - name: Upload Reference Run ID + if: ${{ endsWith(inputs.test_type, 'ly') }} + run: | + gh --repo ${GITHUB_REPOSITORY} issue view ${REFERENCE_ISSUE_ID} --json body -q .body 2>&1 |tee new_body.txt 2>&1 + has_or_not="$(grep -c 'Inductor-${{ inputs.test_type }}-LTS2' new_body.txt || true)" + if [ ${has_or_not} -ne 0 ];then + sed -i "s/Inductor-${{ inputs.test_type }}-LTS2:.*/Inductor-${{ inputs.test_type }}-LTS2: ${GITHUB_RUN_ID}/" new_body.txt + else + echo "Inductor-${{ inputs.test_type }}-LTS2: ${GITHUB_RUN_ID}" |tee -a new_body.txt + fi + gh --repo ${GITHUB_REPOSITORY} issue edit ${REFERENCE_ISSUE_ID} --body-file new_body.txt - name: Upload Inductor XPU UT Log if: ${{ ! cancelled() }} uses: actions/upload-artifact@v4 diff --git a/.github/workflows/nightly_ondemand.yml b/.github/workflows/nightly_ondemand.yml index 932ad56a4..b21593aa3 100644 --- a/.github/workflows/nightly_ondemand.yml +++ b/.github/workflows/nightly_ondemand.yml @@ -157,6 +157,7 @@ jobs: torch_xpu_ops: ${{ needs.Conditions-Filter.outputs.torch_xpu_ops }} python: ${{ needs.Conditions-Filter.outputs.python }} ut: ${{ matrix.ut_name }} + test_type: ${{ needs.Conditions-Filter.outputs.test_type }} Linux-Nightly-Ondemand-E2E-Tests: if: ${{ github.event_name == 'schedule' || contains(inputs.suite, 'e') }} diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 6c26af3c7..e2646a655 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -118,6 +118,7 @@ jobs: pytorch: ${{ needs.conditions-filter.outputs.pytorch }} torch_xpu_ops: ${{ needs.conditions-filter.outputs.pytorch == 'nightly_wheel' && 'pinned' || 'main' }} ut: ${{ matrix.ut_name }} + test_type: ${{ needs.conditions-filter.outputs.pytorch == 'nightly_wheel' && 'wheel-cicd' || 'build-cicd' }} linux-distributed: needs: [conditions-filter, linux-build]