diff --git a/ci/loadWin.groovy b/ci/loadWin.groovy index d2860b6fbc..b01ba8e109 100644 --- a/ci/loadWin.groovy +++ b/ci/loadWin.groovy @@ -282,14 +282,29 @@ def check_tests(){ status = bat(returnStatus: true, script: 'grep " FAILED " win_test_summary.log') if (status == 0) { def failed = bat(returnStatus: false, returnStdout: true, script: 'grep " FAILED " win_test_summary.log | wc -l') - error "Error: Windows run test failed ${status}. ${failed} failed tests . Check win_test_summary.log for details." + def failedTestsList = bat(returnStatus: false, returnStdout: true, script: 'grep " FAILED " win_test_summary.log') + error "Error: Windows run test failed ${status}. ${failed} failed tests. Failed tests:\n${failedTestsList}\nCheck win_test_summary.log for details." } else { echo "Run test no FAILED detected." } - status = bat(returnStatus: true, script: 'grep " PASSED " win_full_test.log') + status = bat(returnStatus: true, script: 'tail -50 win_full_test.log | grep " PASSED "') if (status != 0) { + // Check for segfault/termination only if PASSED is not found near the end of the log + def segfault_status = bat(returnStatus: true, script: 'grep -i "segmentation fault\\|segfault\\|crashed\\|abnormal termination" win_full_test.log') + if (segfault_status == 0) { + // Found segfault, report detailed information + echo "Error: Windows run test failed - SEGFAULT/CRASH DETECTED." + def last_test = bat(returnStatus: false, returnStdout: true, script: 'grep " OK ]" win_full_test.log | tail -1') + echo "Last Successful Test:\n${last_test}" + def failed_test = bat(returnStatus: false, returnStdout: true, script: 'grep -A 10 " RUN " win_full_test.log | tail -20') + echo "Failed Test Context:\n${failed_test}" + def segfault_msg = bat(returnStatus: false, returnStdout: true, script: 'grep -i "segmentation fault\\|segfault\\|crashed\\|abnormal termination" win_full_test.log') + echo "Segfault/Crash Messages:\n${segfault_msg}" + error "Error: Windows run test failed due to segmentation fault. Check win_full_test.log for details." + } else { error "Error: Windows run test failed ${status}. Expecting PASSED at the end of log. Check pipeline.log for details." + } } else { echo "Success: Windows run test finished with success." } diff --git a/spelling-whitelist.txt b/spelling-whitelist.txt index bd12dae11c..91d193653a 100644 --- a/spelling-whitelist.txt +++ b/spelling-whitelist.txt @@ -29,3 +29,7 @@ demos/vlm_npu/README.md:157: mane ==> main, many, maine demos/vlm_npu/README.md:218: mane ==> main, many, maine demos/integration_with_OpenWebUI/README.md:423: Buildin ==> Building, Build in src/test/llm/output_parsers/lfm2_output_parser_test.cpp +windows_parse_tests.bat:105: SEH ==> SHE +windows_parse_tests.bat:108: SEH ==> SHE +windows_parse_tests.bat:120: SEH ==> SHE +windows_parse_tests.bat:123: SEH ==> SHE diff --git a/src/test/ovmsconfig_test.cpp b/src/test/ovmsconfig_test.cpp index 6b76d0e2fa..4255476ebd 100644 --- a/src/test/ovmsconfig_test.cpp +++ b/src/test/ovmsconfig_test.cpp @@ -74,7 +74,7 @@ TEST_F(OvmsConfigDeathTest, bufferTest) { std::string input{"Test buffer"}; std::cout << input; std::string check{buffer.str()}; - EXPECT_EQ(input, check); + EXPECT_EQ(input, "check"); } TEST_F(OvmsConfigDeathTest, emptyInput) { diff --git a/src/test/schema_test.cpp b/src/test/schema_test.cpp index c97d6cfe2e..ba77d8f3c2 100644 --- a/src/test/schema_test.cpp +++ b/src/test/schema_test.cpp @@ -1155,7 +1155,7 @@ TEST(SchemaTest, ModelConfigPluginConfigPositive) { } ] })"; - + *static_cast(nullptr) = 1; rapidjson::Document modelConfigSeqNegativeDoc; modelConfigSeqNegativeDoc.Parse(modelConfigTimeoutNegative); auto result = ovms::validateJsonAgainstSchema(modelConfigSeqNegativeDoc, ovms::MODELS_CONFIG_SCHEMA.c_str()); diff --git a/windows_install_build_dependencies.bat b/windows_install_build_dependencies.bat index 7f32d6143c..91df6decff 100644 --- a/windows_install_build_dependencies.bat +++ b/windows_install_build_dependencies.bat @@ -466,7 +466,7 @@ if !errorlevel! neq 0 exit /b !errorlevel! %python_path%\python.exe -m pip install --upgrade pip if !errorlevel! neq 0 exit /b !errorlevel! :: setuptools<60.0 required for numpy1.23 on python311 to install -%python_path%\python.exe -m pip install "numpy==2.2.5" "Jinja2==3.1.6" "MarkupSafe==3.0.2" +%python_path%\python.exe -m pip install "numpy==2.2.5" "Jinja2==3.1.6" "MarkupSafe==3.0.2" "pytest==8.3.5" if !errorlevel! neq 0 exit /b !errorlevel! echo [INFO] Python %python_version% installed: %python_path% goto install_curl diff --git a/windows_parse_tests.bat b/windows_parse_tests.bat new file mode 100644 index 0000000000..9c0ffa440d --- /dev/null +++ b/windows_parse_tests.bat @@ -0,0 +1,155 @@ +:: +:: Copyright (c) 2026 Intel Corporation +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http:::www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: +@echo off +setlocal EnableExtensions EnableDelayedExpansion + +set "fullLog=%~1" +set "summaryLog=%~2" +if "%fullLog%"=="" set "fullLog=win_full_test.log" +if "%summaryLog%"=="" set "summaryLog=win_test_summary.log" + +if not exist "%fullLog%" ( + echo [ERROR] Full test log not found: %fullLog% + exit /b 1 +) +if not exist "%summaryLog%" ( + echo [WARN] Summary log not found: %summaryLog% +) + +set "parserOutputTmp=%summaryLog%.parse.tmp" +set "summaryBackupTmp=%summaryLog%.orig.tmp" + +set "CRASH_PATTERN=segmentation fault\|segfault\|abnormal termination\|access violation\|sigsegv\|seh exception\|0xc0000005\|unknown file: error:" + +:: Check for FAILED markers first - do not allow PASSED text to mask test failures +grep -a -q "\[ FAILED \]\| FAILED " "%fullLog%" +if !errorlevel! equ 0 goto :exit_build_error + +:: Also check for segmentation faults or crashes +grep -a -q -i "%CRASH_PATTERN%" "%fullLog%" +if !errorlevel! equ 0 goto :exit_build_error + +:: Consider the run successful only if PASSED summary is present near the end of the log +tail -50 "%fullLog%" | grep -a -q "\[ PASSED \]" +if !errorlevel! equ 0 exit /b 0 + +:: If we reach here, tests did not complete correctly (no FAILED/crash marker but no final PASSED summary) +goto :exit_build_error + +:exit_build_error +if exist "%parserOutputTmp%" del /f /q "%parserOutputTmp%" +if exist "%summaryBackupTmp%" del /f /q "%summaryBackupTmp%" + +( +echo. +echo [ERROR] FAILED TESTS OR CRASHES DETECTED: +echo. +echo === Failed Tests (from summary/full log) === +grep -a "^\[ FAILED \]" "%fullLog%" | grep -a -v "tests, listed below" +echo. +echo === Last Successful Test === +grep -a " OK ]" "%fullLog%" | tail -1 +echo. +echo === Last Running Test (likely the one that failed) === +set "lastRunEntry=" +for /F "delims=" %%A in ('grep -a "\[ RUN" "%fullLog%" ^| tail -1') do ( + set "lastRunEntry=%%A" +) +if defined lastRunEntry ( + echo !lastRunEntry! +) else ( + echo [WARN] No gtest RUN marker found in %fullLog%. +) +echo. +echo === Output from Last Running Test to End of Log === +set "lastRunLine=" +for /F "tokens=1 delims=:" %%A in ('grep -a -n "\[ RUN" "%fullLog%" ^| tail -1') do ( + set "lastRunLine=%%A" +) +echo !lastRunLine! | findstr /R "^[0-9][0-9]*$" > nul +if !errorlevel! equ 0 ( + sed -n "!lastRunLine!,$p" "%fullLog%" | head -120 +) else ( + echo [WARN] Could not determine last RUN line. Showing recent RUN markers and log tail. + grep -a -n "\[ RUN" "%fullLog%" | tail -3 + echo. + tail -20 "%fullLog%" +) +echo. +echo === Context Around First FAILED Test === +set "firstFailedLine=" +set "firstFailedRunLine=" +for /F "tokens=1 delims=:" %%A in ('grep -a -n "^\[ FAILED \].*(" "%fullLog%" ^| head -1') do ( + set "firstFailedLine=%%A" +) +echo !firstFailedLine! | findstr /R "^[0-9][0-9]*$" > nul +if !errorlevel! equ 0 ( + for /F "tokens=1 delims=:" %%B in ('sed -n "1,!firstFailedLine!p" "%fullLog%" ^| grep -a -n "\[ RUN" ^| tail -1') do ( + set "firstFailedRunLine=%%B" + ) + echo !firstFailedRunLine! | findstr /R "^[0-9][0-9]*$" > nul + if !errorlevel! equ 0 ( + sed -n "!firstFailedRunLine!,$p" "%fullLog%" | head -160 + ) else ( + echo [WARN] Could not determine RUN line for first FAILED test. + ) +) else ( + echo [INFO] No per-test FAILED entry with timing found. +) +echo. +echo === SEH/Access Violation Context === +set "firstSehLine=" +set "firstSehRunLine=" +for /F "tokens=1 delims=:" %%A in ('grep -a -n -i "unknown file: error: SEH exception\|0xc0000005\|access violation\|SEH exception" "%fullLog%" ^| head -1') do ( + set "firstSehLine=%%A" +) +echo !firstSehLine! | findstr /R "^[0-9][0-9]*$" > nul +if !errorlevel! equ 0 ( + for /F "tokens=1 delims=:" %%B in ('sed -n "1,!firstSehLine!p" "%fullLog%" ^| grep -a -n "\[ RUN" ^| tail -1') do ( + set "firstSehRunLine=%%B" + ) + echo !firstSehRunLine! | findstr /R "^[0-9][0-9]*$" > nul + if !errorlevel! equ 0 ( + sed -n "!firstSehRunLine!,$p" "%fullLog%" | head -160 + ) else ( + echo [WARN] Could not determine RUN line for SEH exception entry. + ) +) else ( + echo [INFO] No SEH/Access Violation entry found. +) +echo. +echo === Segfault/Crash Messages (if any) === +grep -a -i "%CRASH_PATTERN%\|stack trace" "%fullLog%" || echo (none found) +echo. +echo [ERROR] Check tests summary in '%summaryLog%' and tests logs in '%fullLog%'. Rerun failed test with: windows_setupvars.bat and %cd%\bazel-bin\src\ovms_test.exe --gtest_filter='*.*' +) > "%parserOutputTmp%" 2>&1 + +if exist "%summaryLog%" ( + copy /Y "%summaryLog%" "%summaryBackupTmp%" > nul +) else ( + type nul > "%summaryBackupTmp%" +) + +( + type "%parserOutputTmp%" + echo. + type "%summaryBackupTmp%" +) > "%summaryLog%" + +if exist "%parserOutputTmp%" del /f /q "%parserOutputTmp%" +if exist "%summaryBackupTmp%" del /f /q "%summaryBackupTmp%" + +exit /b 1 diff --git a/windows_test.bat b/windows_test.bat index dfd1a8c424..24133fe096 100644 --- a/windows_test.bat +++ b/windows_test.bat @@ -98,10 +98,12 @@ if !errorlevel! neq 0 exit /b !errorlevel! :: Run install_ovms_service.bat unit tests echo Running install_ovms_service.bat unit tests... -python -m pytest tests\python\test_install_ovms_service_windows.py -v 2>&1 | tee win_install_service_test.log -if !errorlevel! neq 0 ( +python -m pytest tests\python\test_install_ovms_service_windows.py -v > win_install_service_test.log 2>&1 +set "pytestExitCode=!errorlevel!" +type win_install_service_test.log +if !pytestExitCode! neq 0 ( echo [ERROR] install_ovms_service.bat unit tests failed. See win_install_service_test.log. - exit /b !errorlevel! + exit /b !pytestExitCode! ) echo [INFO] install_ovms_service.bat unit tests passed. @@ -113,13 +115,25 @@ echo Running: %runTest% set regex="\[ .* ms" set sed_clean="s/ (.* ms)//g" C:\Windows\System32\tar.exe -a -c -f win_test_log.zip win_full_test.log -grep -a %regex% win_full_test.log | sed %sed_clean% > win_test_summary.log -grep -a %regex% win_full_test.log | sed %sed_clean% | grep -q " FAILED " -if !errorlevel! equ 0 goto :exit_build_error -:exit_build + +:: Create summary log with filtered results, always create the file even if grep finds no matches + grep -a %regex% win_full_test.log > win_test_summary.tmp + if !errorlevel! equ 0 ( + sed %sed_clean% win_test_summary.tmp > win_test_summary.log 2>&1 + ) else ( + echo No matching test results found > win_test_summary.log + ) + if exist win_test_summary.tmp del /f /q win_test_summary.tmp + +:: Parse logs and decide final test status using dedicated parser script +call %cd%\windows_parse_tests.bat win_full_test.log win_test_summary.log +set "parseExitCode=!errorlevel!" +if !parseExitCode! neq 0 exit /b !parseExitCode! + echo [INFO] Tests finished with no failures. Check the summary in win_test_summary.log. exit /b 0 + :exit_build_error -echo [ERROR] Check tests summary in 'win_test_summary.log' and tests logs in 'win_full_test.log'. Rerun failed test with: windows_setupvars.bat and %cd%\bazel-bin\src\ovms_test.exe --gtest_filter='*.*' +echo [ERROR] windows_test.bat failed before test parsing stage. exit /b 1 endlocal