Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 16 additions & 15 deletions check.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from pathlib import Path

from scripts.test import binaryenjs, finalize, shared, support, wasm2js, wasm_opt
from scripts.test.shared import print_heading

assert sys.version_info >= (3, 10), 'requires Python 3.10'

Expand All @@ -41,7 +42,7 @@ def get_changelog_version():


def run_version_tests():
print('[ checking --version ... ]\n')
print_heading('checking --version ...')

not_executable_suffix = ['.DS_Store', '.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest']
executable_prefix = ['wasm']
Expand All @@ -67,7 +68,7 @@ def run_version_tests():


def run_wasm_dis_tests():
print('\n[ checking wasm-dis on provided binaries... ]\n')
print_heading('checking wasm-dis on provided binaries...')

for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']):
print('..', os.path.basename(t))
Expand All @@ -85,7 +86,7 @@ def run_wasm_dis_tests():


def run_crash_tests():
print("\n[ checking we don't crash on tricky inputs... ]\n")
print_heading("checking we don't crash on tricky inputs...")

for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
Expand All @@ -95,7 +96,7 @@ def run_crash_tests():


def run_dylink_tests():
print("\n[ we emit dylink sections properly... ]\n")
print_heading('we emit dylink sections properly...')

dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm'))
for t in sorted(dylink_tests):
Expand All @@ -109,7 +110,7 @@ def run_dylink_tests():


def run_ctor_eval_tests():
print('\n[ checking wasm-ctor-eval... ]\n')
print_heading('checking wasm-ctor-eval...')

for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
Expand All @@ -126,7 +127,7 @@ def run_ctor_eval_tests():


def run_wasm_metadce_tests():
print('\n[ checking wasm-metadce ]\n')
print_heading('checking wasm-metadce')

for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
Expand All @@ -141,10 +142,10 @@ def run_wasm_metadce_tests():

def run_wasm_reduce_tests():
if not shared.has_shell_timeout():
print('\n[ skipping wasm-reduce testcases]\n')
print_heading('skipping wasm-reduce testcases')
return

print('\n[ checking wasm-reduce testcases]\n')
print_heading('checking wasm-reduce testcases')

# fixed testcases
for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']):
Expand All @@ -161,7 +162,7 @@ def run_wasm_reduce_tests():
# run on a nontrivial fuzz testcase, for general coverage
# this is very slow in ThreadSanitizer, so avoid it there
if 'fsanitize=thread' not in str(os.environ):
print('\n[ checking wasm-reduce fuzz testcase ]\n')
print_heading('checking wasm-reduce fuzz testcase')
# TODO: re-enable multivalue once it is better optimized
support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'lit/basic/signext.wast'), '-ttf', '-Os', '-o', 'a.wasm', '--detect-features', '--disable-multivalue'])
before = os.stat('a.wasm').st_size
Expand Down Expand Up @@ -294,7 +295,7 @@ def red_stderr():


def run_spec_tests():
print('\n[ checking wasm-shell spec testcases... ]\n')
print_heading('checking wasm-shell spec testcases...')

worker_count = os.cpu_count()
print("Running with", worker_count, "workers")
Expand Down Expand Up @@ -326,7 +327,7 @@ def run_spec_tests():


def run_validator_tests():
print('\n[ running validation tests... ]\n')
print_heading('running validation tests...')
# Ensure the tests validate by default
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast'), '-o', 'a.wasm']
support.run_command(cmd)
Expand All @@ -343,7 +344,7 @@ def run_validator_tests():


def run_example_tests():
print('\n[ checking native example testcases...]\n')
print_heading('checking native example testcases...')
if not shared.NATIVECC or not shared.NATIVEXX:
shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!')
return
Expand Down Expand Up @@ -385,7 +386,7 @@ def run_example_tests():


def run_unittest():
print('\n[ checking unit tests...]\n')
print_heading('checking unit tests...')

# equivalent to `python -m unittest discover -s ./test -v`
suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test))
Expand Down Expand Up @@ -472,13 +473,13 @@ def main():

# Check/display the results
if shared.num_failures == 0:
print('\n[ success! ]')
print_heading('success!', last=True)

if shared.warnings:
print('\n' + '\n'.join(shared.warnings))

if shared.num_failures > 0:
print('\n[ ' + str(shared.num_failures) + ' failures! ]')
print_heading(f'{shared.num_failures} failures!', last=True)
return 1

return 0
Expand Down
18 changes: 10 additions & 8 deletions scripts/auto_update_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@
import subprocess
import sys

from test.shared import print_heading

from test import binaryenjs, finalize, shared, support, wasm2js, wasm_opt


def update_example_tests():
print('\n[ checking example testcases... ]\n')
print_heading('checking example testcases...')
for src in shared.get_tests(shared.get_test_dir('example')):
basename = os.path.basename(src)
output_file = os.path.join(shared.options.binaryen_bin, 'example')
Expand Down Expand Up @@ -64,7 +66,7 @@ def update_example_tests():


def update_wasm_dis_tests():
print('\n[ checking wasm-dis on provided binaries... ]\n')
print_heading('checking wasm-dis on provided binaries...')
for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_DIS + [t]
Expand All @@ -76,7 +78,7 @@ def update_wasm_dis_tests():


def update_ctor_eval_tests():
print('\n[ checking wasm-ctor-eval... ]\n')
print_heading('checking wasm-ctor-eval...')
for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
ctors = open(t + '.ctors').read().strip()
Expand All @@ -93,7 +95,7 @@ def update_ctor_eval_tests():


def update_metadce_tests():
print('\n[ checking wasm-metadce... ]\n')
print_heading('checking wasm-metadce...')
for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
graph = t + '.graph.txt'
Expand All @@ -108,7 +110,7 @@ def update_metadce_tests():


def update_reduce_tests():
print('\n[ checking wasm-reduce ]\n')
print_heading('checking wasm-reduce')
for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']):
print('..', os.path.basename(t))
# convert to wasm
Expand All @@ -120,7 +122,7 @@ def update_reduce_tests():


def update_spec_tests():
print('\n[ updating wasm-shell spec testcases... ]\n')
print_heading('updating wasm-shell spec testcases...')

for t in shared.options.spec_tests:
print('..', os.path.basename(t))
Expand All @@ -138,7 +140,7 @@ def update_spec_tests():


def update_lit_tests():
print('\n[ updating lit testcases... ]\n')
print_heading('updating lit testcases...')
script = os.path.join(shared.options.binaryen_root,
'scripts',
'update_lit_checks.py')
Expand Down Expand Up @@ -188,7 +190,7 @@ def main():
for test in shared.requested:
TEST_SUITES[test]()

print('\n[ success! ]')
print_heading('success!', last=True)


if __name__ == '__main__':
Expand Down
5 changes: 3 additions & 2 deletions scripts/test/binaryenjs.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import subprocess

from . import shared, support
from .shared import print_heading


def make_js_test_header(binaryen_js):
Expand Down Expand Up @@ -52,7 +53,7 @@ def test_binaryen_js():
if not os.path.exists(shared.BINARYEN_JS):
shared.fail_with_error('no ' + shared.BINARYEN_JS + ' build to test')

print('\n[ checking binaryen.js testcases (' + shared.BINARYEN_JS + ')... ]\n')
print_heading(f'checking binaryen.js testcases ({shared.BINARYEN_JS})...')

for s in shared.get_tests(shared.get_test_dir('binaryen.js'), ['.js']):
outname = make_js_test(s, shared.BINARYEN_JS)
Expand Down Expand Up @@ -87,7 +88,7 @@ def update_binaryen_js_tests():
print('no binaryen.js build to test')
return

print('\n[ checking binaryen.js testcases... ]\n')
print_heading('checking binaryen.js testcases...')
node_has_wasm = shared.NODEJS and support.node_has_webassembly(shared.NODEJS)
for s in shared.get_tests(shared.get_test_dir('binaryen.js'), ['.js']):
outname = make_js_test(s, shared.BINARYEN_JS)
Expand Down
5 changes: 3 additions & 2 deletions scripts/test/finalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import os

from . import shared, support
from .shared import print_heading


def args_for_finalize(filename):
Expand Down Expand Up @@ -46,14 +47,14 @@ def run_test(input_path):


def test_wasm_emscripten_finalize():
print('\n[ checking wasm-emscripten-finalize testcases... ]\n')
print_heading('checking wasm-emscripten-finalize testcases...')

for input_path in shared.get_tests(shared.get_test_dir('finalize'), ['.wat', '.wasm']):
run_test(input_path)


def update_finalize_tests():
print('\n[ updating wasm-emscripten-finalize testcases... ]\n')
print_heading('updating wasm-emscripten-finalize testcases...')

for input_path in shared.get_tests(shared.get_test_dir('finalize'), ['.wat', '.wasm']):
print('..', input_path)
Expand Down
18 changes: 18 additions & 0 deletions scripts/test/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,24 @@ def warn(text):
print('warning:', text, file=sys.stderr)


first = True


def print_heading(msg, last=False):
global first
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we avoid the global and just print an extra newline here?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think I ended up having to do this because:

  1. Some tests suites call print_heading mulitple times.
  2. We want the extra newlines also when running the suites as part of auto_update_tests.py

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems that we don't need it for point 1 because only the first call would need the preceding newline (we know within the function whether to insert a newline or not) and for point 2, we could similarly add the newline here?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we have a bunch of print_heading within a single suite I think the expectation is that there will be an empty line between those sub-sections of tests.. at least I imagine that is useful?

With your suggestion this PR would effectively change that behavior. Maybe that is fine?

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just mean that within a testsuite we can add a print() before each subsection of the test, and in between testsuites, we'd add the newline after the code that I linked. Basically if we make each subsection end with an extra newline, and add an extra newline between each testsuite, then it should do what we want and we don't need the global logic.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I can give that go.


if last:
print(f'\n[ {msg} ]')
return

if first:
print(f'[ {msg} ]\n')
first = False
return

print(f'\n[ {msg} ]\n')


# setup

# Locate Binaryen build artifacts directory (bin/ by default)
Expand Down
5 changes: 3 additions & 2 deletions scripts/test/wasm2js.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import subprocess

from . import shared, support
from .shared import print_heading

basic_tests = shared.get_tests(os.path.join(shared.options.binaryen_test, 'lit', 'basic'))
# memory64 is not supported in wasm2js yet (but may be with BigInt eventually).
Expand Down Expand Up @@ -159,7 +160,7 @@ def test_asserts_output():


def test_wasm2js():
print('\n[ checking wasm2js testcases... ]\n')
print_heading('checking wasm2js testcases...')
check_for_stale_files()
if shared.skip_if_on_windows('wasm2js'):
return
Expand All @@ -168,7 +169,7 @@ def test_wasm2js():


def update_wasm2js_tests():
print('\n[ checking wasm2js ]\n')
print_heading('checking wasm2js')

for opt in (0, 1):
for wasm in basic_tests + spec_tests + wasm2js_tests:
Expand Down
15 changes: 8 additions & 7 deletions scripts/test/wasm_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@
import subprocess

from . import shared, support
from .shared import print_heading


def test_wasm_opt():
print('\n[ checking wasm-opt -o notation... ]\n')
print_heading('checking wasm-opt -o notation...')

for extra_args in [[], ['--no-validation']]:
wast = os.path.join(shared.options.binaryen_test, 'hello_world.wat')
Expand All @@ -30,7 +31,7 @@ def test_wasm_opt():
support.run_command(cmd)
shared.fail_if_not_identical_to_file(open(out).read(), wast)

print('\n[ checking wasm-opt binary reading/writing... ]\n')
print_heading('checking wasm-opt binary reading/writing...')

shutil.copyfile(os.path.join(shared.options.binaryen_test, 'hello_world.wat'), 'a.wat')
shared.delete_from_orbit('a.wasm')
Expand All @@ -40,7 +41,7 @@ def test_wasm_opt():
support.run_command(shared.WASM_OPT + ['a.wasm', '-o', 'b.wast', '-S', '-q'])
assert open('b.wast', 'rb').read()[0] != 0, 'we emit text with -S'

print('\n[ checking wasm-opt passes... ]\n')
print_heading('checking wasm-opt passes...')

for t in shared.get_tests(shared.get_test_dir('passes'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
Expand Down Expand Up @@ -87,7 +88,7 @@ def test_wasm_opt():
with open('a.wat') as actual:
shared.fail_if_not_identical_to_file(actual.read(), t + '.wat')

print('\n[ checking wasm-opt parsing & printing... ]\n')
print_heading('checking wasm-opt parsing & printing...')

for t in shared.get_tests(shared.get_test_dir('print'), ['.wast']):
print('..', os.path.basename(t))
Expand All @@ -104,13 +105,13 @@ def test_wasm_opt():


def update_wasm_opt_tests():
print('\n[ updating wasm-opt -o notation... ]\n')
print_heading('updating wasm-opt -o notation...')
wast = os.path.join(shared.options.binaryen_test, 'hello_world.wat')
cmd = shared.WASM_OPT + [wast, '-o', 'a.wast', '-S']
support.run_command(cmd)
open(wast, 'w').write(open('a.wast').read())

print('\n[ updating wasm-opt parsing & printing... ]\n')
print_heading('updating wasm-opt parsing & printing...')
for t in shared.get_tests(shared.get_test_dir('print'), ['.wast']):
print('..', os.path.basename(t))
wasm = t.replace('.wast', '')
Expand All @@ -126,7 +127,7 @@ def update_wasm_opt_tests():
with open(wasm + '.minified.txt', 'wb') as o:
o.write(actual)

print('\n[ updating wasm-opt passes... ]\n')
print_heading('updating wasm-opt passes...')
for t in shared.get_tests(shared.get_test_dir('passes'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
# windows has some failures that need to be investigated:
Expand Down
Loading