Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 17 additions & 14 deletions .github/actions/linux-uttest/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -207,20 +207,23 @@ runs:
else
echo -e "No Failure logs"
fi
# Copied the passed logs
if ls passed*.log 1> /dev/null 2>&1; then
cp passed*.log ${{ github.workspace }}/ut_log
echo -e "Passed logs Copied"
else
echo -e "No Passed logs"
fi
# Copied the Summary logs
if ls category*.log 1> /dev/null 2>&1; then
cp category*.log ${{ github.workspace }}/ut_log
echo -e "Category logs Copied"
else
echo -e "No Category logs"
fi

log_dir="${{ github.workspace }}/ut_log"
copy_logs() {
local pattern=$1
local name=$2

if ls ${pattern} 1> /dev/null 2>&1; then
cp ${pattern} "$log_dir"
echo -e "${name} logs Copied"
else
echo -e "No ${name} logs"
fi
}
copy_logs "passed*.log" "Passed"
copy_logs "category*.log" "Category"
copy_logs "all_cases*.log" "All cases collect"

if [ -e ut_failure_list.csv ];then
cp ut_failure_list.csv ${{ github.workspace }}/ut_log/ut_failure_list.csv || true
fi
70 changes: 58 additions & 12 deletions .github/scripts/check-ut.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,18 @@
parser = argparse.ArgumentParser(description='Test results analyzer')
parser.add_argument('-n', '--ut-name', type=str, default='', help='UT name')
parser.add_argument('-i', '--input-files', nargs='+', help='JUnit XML files or log files')
parser.add_argument('-o', '--output-dir', type=str, default='.', help='Output directory for log files (default: current directory)')
args = parser.parse_args()

os.makedirs(args.output_dir, exist_ok=True)

failures = []
summaries = []
failures_by_category = defaultdict(list)
passed_cases = []
passed_by_category = defaultdict(list)
all_cases = []
all_cases_by_category = defaultdict(list)
category_totals = defaultdict(lambda: {
'Test cases': 0,
'Passed': 0,
Expand Down Expand Up @@ -119,6 +124,8 @@ def print_md_row(row, print_header=False, failure_list=None):
if failure_list is not None:
failure_list.write(f"| {row_values} |\n")

def get_output_path(filename):
return os.path.join(args.output_dir, filename)

def print_failures(failure_list=None):
if not failures:
Expand Down Expand Up @@ -149,13 +156,29 @@ def generate_failures_log():
if not category_failures:
continue

log_filename = f"failures_{category}.log"
log_filename = get_output_path(f"failures_{category}.log")
with open(log_filename, "w", encoding='utf-8') as log_file:
for case in category_failures:
class_name = get_classname(case)
test_name = get_name(case)
log_file.write(f"{category},{class_name},{test_name}\n")

def generate_all_cases_log():
if not all_cases:
return

for category, category_cases in all_cases_by_category.items():
if not category_cases:
continue

log_filename = get_output_path(f"all_cases_{category}.log")
with open(log_filename, "w", encoding='utf-8') as log_file:
for case in category_cases:
class_name = get_classname(case)
test_name = get_name(case)
status = get_result(case)
log_file.write(f"{category},{class_name},{test_name}\n")

def parse_log_file(log_file):
with open(log_file, encoding='utf-8') as f:
content = f.read()
Expand Down Expand Up @@ -267,7 +290,30 @@ def process_xml_file(xml_file):
parts_category = os.path.basename(xml_file).split('.')[0]
category = determine_category(parts_category)

def process_suite(suite, category):
suite_cases_count = 0

for case in suite:
if hasattr(case, 'tests'):
suite_cases_count += process_suite(case, category)
else:
case._file_category = category
all_cases.append(case)
all_cases_by_category[category].append(case)
suite_cases_count += 1

if get_result(case) not in ["passed", "skipped"]:
case._file_category = category
failures.append(case)
elif get_result(case) == "passed":
case._file_category = category
passed_cases.append(case)
passed_by_category[category].append(case)

return suite_cases_count

for suite in xml:
actual_cases_count = process_suite(suite, category)
suite_summary = {
'Category': category,
'UT': ut,
Expand All @@ -287,14 +333,9 @@ def process_xml_file(xml_file):
category_totals[category]['Failures'] += suite_summary['Failures']
category_totals[category]['Errors'] += suite_summary['Errors']

for case in suite:
if get_result(case) not in ["passed", "skipped"]:
case._file_category = category
failures.append(case)
elif get_result(case) == "passed":
case._file_category = category
passed_cases.append(case)
passed_by_category[category].append(case)
if suite.tests != actual_cases_count:
print(f"Warning: Suite '{ut}' has {suite.tests} tests in summary but {actual_cases_count} cases were processed",
file=sys.stderr)
except Exception as e:
print(f"Error processing {xml_file}: {e}", file=sys.stderr)

Expand All @@ -306,7 +347,7 @@ def generate_passed_log():
if not category_passed:
continue

log_filename = f"passed_{category}.log"
log_filename = get_output_path(f"passed_{category}.log")
with open(log_filename, "w", encoding='utf-8') as log_file:
for case in category_passed:
class_name = get_classname(case)
Expand All @@ -320,7 +361,7 @@ def generate_category_totals_log():
if totals['Test cases'] == 0:
continue

log_filename = f"category_{category}.log"
log_filename = get_output_path(f"category_{category}.log")
with open(log_filename, "w", encoding='utf-8') as log_file:
log_file.write(f"Category: {category}\n")
log_file.write(f"Test cases: {totals['Test cases']}\n")
Expand Down Expand Up @@ -366,19 +407,24 @@ def print_summary():
print_md_row(totals)

def main():
os.makedirs(args.output_dir, exist_ok=True)

for input_file in args.input_files:
if input_file.endswith('.log'):
process_log_file(input_file)
elif input_file.endswith('.xml'):
process_xml_file(input_file)
else:
print(f"Skipping unknown file type: {input_file}", file=sys.stderr)

if args.ut_name != "skipped_ut":
with open("ut_failure_list.csv", "w") as failure_list:
failure_list_path = get_output_path("ut_failure_list.csv")
with open(failure_list_path, "w", encoding='utf-8') as failure_list:
print_failures(failure_list=failure_list)

generate_failures_log()
generate_passed_log()
generate_all_cases_log()
generate_category_totals_log()
print_summary()

Expand Down
177 changes: 177 additions & 0 deletions .github/scripts/ut_result_check.sh
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,182 @@ check_skipped_ut() {
fi
}

categorize_failures() {
local failures_log="$1"
local all_ut_log="$2"
local output_dir="${3:-.}"

# Check if required parameters are provided
if [[ $# -lt 2 ]]; then
echo "Usage: categorize_failures <failures_log> <all_ut_log> [output_dir]"
echo "Example: categorize_failures failures.txt all_ut.txt ./output"
return 1
fi

# Check if files exist
if [[ ! -f "$failures_log" ]]; then
echo "Error: Failures log file not found: $failures_log"
return 1
fi

if [[ ! -f "$all_ut_log" ]]; then
echo "Error: All UT log file not found: $all_ut_log"
return 1
fi

# Create output directory
mkdir -p "$output_dir"

# Output file paths
local regression_file="$output_dir/regression_ut.txt"
local new_issue_file="$output_dir/new_issue_ut.txt"
local summary_file="$output_dir/summary.txt"

# Clear output files (if they exist)
true > "$regression_file"
true > "$new_issue_file"
true > "$summary_file"

# Counters
local regression_count=0
local new_issue_count=0
local total_failures=0

echo "Starting UT failure analysis..."
echo "Failures log: $failures_log"
echo "All UT log: $all_ut_log"
echo "Output directory: $output_dir"
echo ""

# Process failures log line by line
while IFS= read -r line || [[ -n "$line" ]]; do
# Skip empty lines
if [[ -z "$line" ]]; then
continue
fi

total_failures=$((total_failures + 1))

# Check if this line exists in all UT log
# Using grep -Fxq: -F fixed strings, -x whole line match, -q quiet mode
if grep -Fxq "$line" "$all_ut_log" 2>/dev/null; then
# Exists in all UT log -> Regression issue
regression_count=$((regression_count + 1))
echo "$line" >> "$regression_file"
else
# Not found in all UT log -> New issue
new_issue_count=$((new_issue_count + 1))
echo "$line" >> "$new_issue_file"
fi
done < "$failures_log"

# Generate summary report
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')

echo ""
echo "Analysis completed!"
echo "================================="
echo "Total New failed UTs: $total_failures"
echo "Regression issues: $regression_count"
echo "New UTs issues: $new_issue_count"
echo "================================="
echo ""

# Display regression cases
if [[ $regression_count -gt 0 ]]; then
echo "REGRESSION CASES ISSUE ($regression_count):"
echo "---------------------------------"
while IFS= read -r line; do
echo " $line"
done < "$regression_file"
echo ""
else
echo "✅ No regression cases found."
echo ""
fi

# Display new issue cases
if [[ $new_issue_count -gt 0 ]]; then
echo "NEW UT CASES ISSUE ($new_issue_count):"
echo "--------------------------------"
while IFS= read -r line; do
echo " $line"
done < "$new_issue_file"
echo ""
else
echo "✅ No new UT cases issue found."
echo ""
fi

cat > "$summary_file" << EOF
Failed UT Categorization Report
================================
Generated: $timestamp
Failures log file: $(basename "$failures_log")
All UT log file: $(basename "$all_ut_log")

Statistics:
-----------
Total New failed UTs: $total_failures
Regression issues: $regression_count
New UTs issues: $new_issue_count

Output Files:
-------------
Regression UT list: $(basename "$regression_file") ($regression_count items)
New issue UT list: $(basename "$new_issue_file") ($new_issue_count items)

Detailed Lists:
---------------

EOF

# Add regression UT list to summary
if [[ $regression_count -gt 0 ]]; then
{
echo "Regression Issues:"
echo "-----------"
cat "$regression_file"
} >> "$summary_file"
echo "" >> "$summary_file"
else
echo "✅ No regression issues found" >> "$summary_file"
echo "" >> "$summary_file"
fi

# Add new issue UT list to summary
if [[ $new_issue_count -gt 0 ]]; then
{
echo "New Issues:"
echo "-----------"
cat "$new_issue_file"
} >> "$summary_file"
else
echo "✅ No new issues found" >> "$summary_file"
fi

# Print summary to console
echo ""
echo "Analysis completed!"
echo "================================="
echo "Total New failed UTs: $total_failures"
echo "Regression issues: $regression_count"
echo "New UTs issues: $new_issue_count"
echo "================================="
echo ""
echo "Output files:"
echo " Regression UT list: $regression_file"
echo " New issue UT list: $new_issue_file"
echo " Detailed summary: $summary_file"

# Show warning if no failures were found
if [[ $total_failures -eq 0 ]]; then
echo ""
echo "Note: No failed UT records found in the failures log file."
fi
}

# Main test runner for standard test suites (op_regression, op_extended, etc.)
run_main_tests() {
local suite="$1"
Expand Down Expand Up @@ -216,6 +392,7 @@ run_main_tests() {
local failed_count=0 passed_count=0
if [[ -f "failures_${suite}_filtered.log" ]]; then
failed_count=$(wc -l < "failures_${suite}_filtered.log")
categorize_failures failures_${suite}_filtered.log all_cases_${suite}_reference.log categorize_failures
fi
if [[ -f "passed_${suite}.log" ]]; then
passed_count=$(wc -l < "passed_${suite}.log")
Expand Down
Loading
Loading