Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,42 +29,13 @@ jobs:
python-version: [ "3.12" ]
division: ["closed", "open", "closed-open"]
category: ["datacenter", "edge"]
round: ["5.0", "5.1"]
round: ["5.1"]
folder_name: ["closed", "open"]
action: ["run", "docker"]
exclude:
- os: macos-latest
- os: windows-latest
- category: "edge"
include:
- os: ubuntu-latest
python-version: "3.12"
division: "closed"
category: "edge"
round: "5.0"
folder_name: "closed-edge"
action: "run"
- os: ubuntu-latest
python-version: "3.12"
division: "closed"
category: "edge"
round: "5.0"
folder_name: "closed-edge"
action: "docker"
- os: ubuntu-latest
python-version: "3.12"
division: "open"
category: "edge"
round: "5.0"
folder_name: "closed-edge"
action: "run"
- os: ubuntu-latest
python-version: "3.12"
division: "open"
category: "edge"
round: "5.0"
folder_name: "closed-edge"
action: "docker"
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
Expand All @@ -87,13 +58,13 @@ jobs:
fi
- name: Pull repo where test cases are uploaded
run: |
git clone -b submission-generation-examples https://github.com/mlcommons/inference.git submission_generation_examples
git clone -b new-sub-dir https://github.com/anandhu-eng/inference.git submission_generation_examples
- name: Run Submission Generation - round-${{ matrix.round }}${{ matrix.folder_name }} ${{ matrix.action }} ${{ matrix.category }} ${{ matrix.division }}
run: |
if [ "${{ matrix.folder_name }}" == "closed" ]; then
description="Test submission - contains closed edge and datacenter"
elif [ "${{ matrix.folder_name }}" == "closed-power" ]; then
description="Test submission - contains closed-power edge and datacenter results"
# elif [ "${{ matrix.folder_name }}" == "closed-power" ]; then
# description="Test submission - contains closed-power edge and datacenter results"
fi
# Dynamically set the log group to simulate a dynamic step name
echo "::group::$description"
Expand All @@ -104,11 +75,12 @@ jobs:
fi

if [ -n "${{ inputs.repo-url }}" ]; then
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.folder_name }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} $docker_tags
mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} $docker_tags
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.folder_name }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} --extra_checker_args="--skip-extra-accuracy-files-check" $docker_tags
mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} --extra_checker_args="--skip-extra-accuracy-files-check" $docker_tags

else
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.folder_name }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet $docker_tags
mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} $docker_tags
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.folder_name }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --extra_checker_args="--skip-extra-accuracy-files-check" --quiet $docker_tags
mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} --extra_checker_args="--skip-extra-accuracy-files-check" $docker_tags
fi

echo "::endgroup::"
49 changes: 14 additions & 35 deletions script/generate-mlperf-inference-submission/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,16 +309,11 @@ def generate_submission(env, state, inp, submission_division, logger):
sub_res = new_res

submission_path = os.path.join(path_submission, "results", sub_res)
measurement_path = os.path.join(
path_submission, "measurements", sub_res)
compliance_path = os.path.join(path_submission, "compliance", sub_res)
system_path = os.path.join(path_submission, "systems")
submission_system_path = system_path

if not os.path.isdir(submission_path):
os.makedirs(submission_path)
if not os.path.isdir(measurement_path):
os.makedirs(measurement_path)
if not os.path.isdir(submission_system_path):
os.makedirs(submission_system_path)
system_file = os.path.join(submission_system_path, sub_res + ".json")
Expand All @@ -342,8 +337,6 @@ def generate_submission(env, state, inp, submission_division, logger):
platform_info_file = None
result_model_path = os.path.join(result_path, model)
submission_model_path = os.path.join(submission_path, model)
measurement_model_path = os.path.join(measurement_path, model)
compliance_model_path = os.path.join(compliance_path, model)
code_model_path = os.path.join(code_path, model)
scenarios = [
f for f in os.listdir(result_model_path) if not os.path.isfile(
Expand All @@ -367,10 +360,6 @@ def generate_submission(env, state, inp, submission_division, logger):
result_model_path, scenario)
submission_scenario_path = os.path.join(
submission_model_path, scenario)
measurement_scenario_path = os.path.join(
measurement_model_path, scenario)
compliance_scenario_path = os.path.join(
compliance_model_path, scenario)

'''
if duplicate and scenario == 'singlestream':
Expand Down Expand Up @@ -445,20 +434,11 @@ def generate_submission(env, state, inp, submission_division, logger):
)
continue

if not os.path.isdir(measurement_scenario_path):
os.makedirs(measurement_scenario_path)

for mode in modes:
result_mode_path = os.path.join(result_scenario_path, mode)
submission_mode_path = os.path.join(
submission_scenario_path, mode)
submission_measurement_path = measurement_scenario_path
submission_compliance_path = os.path.join(
compliance_scenario_path, mode)
if mode.startswith("TEST"):
submission_results_path = submission_compliance_path
else:
submission_results_path = submission_mode_path
submission_results_path = submission_mode_path
if os.path.exists(submission_results_path):
shutil.rmtree(submission_results_path)

Expand Down Expand Up @@ -492,10 +472,10 @@ def generate_submission(env, state, inp, submission_division, logger):

shutil.copy(
analyzer_settings_file, os.path.join(
submission_measurement_path, "analyzer_table.md"))
submission_scenario_path, "analyzer_table.md"))
shutil.copy(
power_settings_file, os.path.join(
submission_measurement_path, "power_settings.md"))
submission_scenario_path, "power_settings.md"))

result_ranging_path = os.path.join(
result_mode_path, 'ranging')
Expand Down Expand Up @@ -563,14 +543,14 @@ def generate_submission(env, state, inp, submission_division, logger):
if os.path.exists(user_conf_path):
shutil.copy(
user_conf_path, os.path.join(
measurement_scenario_path, 'user.conf'))
submission_scenario_path, 'user.conf'))
else:
user_conf_path = os.path.join(
result_mode_path, "user.conf")
if os.path.exists(user_conf_path):
shutil.copy(
user_conf_path, os.path.join(
submission_measurement_path, 'user.conf'))
submission_scenario_path, 'user.conf'))
else:
if mode.lower() == "performance":
return {
Expand All @@ -579,7 +559,7 @@ def generate_submission(env, state, inp, submission_division, logger):
# First check for measurements directory in scenario folder
measurements_json_path = os.path.join(
result_scenario_path, "measurements.json")
target_measurement_json_path = measurement_scenario_path
target_measurement_json_path = submission_scenario_path

if not os.path.exists(measurements_json_path):
measurements_json_path = os.path.join(
Expand Down Expand Up @@ -668,7 +648,7 @@ def generate_submission(env, state, inp, submission_division, logger):
shutil.copy(
os.path.join(
result_mode_path, f), os.path.join(
submission_measurement_path, f))
submission_scenario_path, f))
if f == "system_info.txt" and not platform_info_file:
# the first found system_info.txt will be taken as platform info file for a specific model to be placed in
# measurements-model folder when generating
Expand All @@ -679,7 +659,7 @@ def generate_submission(env, state, inp, submission_division, logger):
shutil.copy(
os.path.join(
result_mode_path, f), os.path.join(
submission_measurement_path, mode + "_" + f))
submission_scenario_path, mode + "_" + f))

if mode == "accuracy":
if os.path.exists(os.path.join(
Expand All @@ -705,13 +685,12 @@ def generate_submission(env, state, inp, submission_division, logger):
result_scenario_path, "system_info.txt")):
shutil.copy(
os.path.join(
result_scenario_path, "system_info.txt"), os.path.join(
submission_measurement_path, f))
result_scenario_path, "system_info.txt"), submission_scenario_path)
platform_info_file = os.path.join(
result_scenario_path, "system_info.txt")

readme_file = os.path.join(
submission_measurement_path, "README.md")
submission_scenario_path, "README.md")
if not os.path.exists(readme_file):
with open(readme_file, mode='w') as f:
f.write("TBD") # create an empty README
Expand Down Expand Up @@ -741,7 +720,7 @@ def generate_submission(env, state, inp, submission_division, logger):
shutil.copy(
sys_info_file,
os.path.join(
measurement_model_path,
submission_model_path,
"system_info.txt"))

# Copy system_info.txt to the submission measurements folder if any
Expand All @@ -757,14 +736,14 @@ def generate_submission(env, state, inp, submission_division, logger):
shutil.copy(
sys_info_file,
os.path.join(
measurement_path,
submission_path,
"system_info.txt"))
else:
if env.get('MLC_GET_PLATFORM_DETAILS', '') == "yes":
mlc_input = {'action': 'run',
'automation': 'script',
'tags': 'get,platform,details',
'env': {'MLC_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")},
'env': {'MLC_PLATFORM_DETAILS_FILE_PATH': os.path.join(submission_path, "system_info.txt")},
'quiet': True
}
mlc = i['automation'].action_object
Expand All @@ -779,7 +758,7 @@ def generate_submission(env, state, inp, submission_division, logger):

logger.info(tabulate(result_table, headers=headers, tablefmt="pretty"))

sut_readme_file = os.path.join(measurement_path, "README.md")
sut_readme_file = os.path.join(submission_path, "README.md")
with open(sut_readme_file, mode='w') as f:
f.write(tabulate(result_table, headers=headers, tablefmt="github"))

Expand Down
2 changes: 1 addition & 1 deletion script/generate-mlperf-inference-submission/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ input_mapping:
dashboard_wb_project: MLC_MLPERF_DASHBOARD_WANDB_PROJECT
device: MLC_MLPERF_DEVICE
division: MLC_MLPERF_SUBMISSION_DIVISION
extra_checker_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG
extra_checker_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS
hw_name: MLC_HW_NAME
hw_notes_extra: MLC_MLPERF_SUT_HW_NOTES_EXTRA
noinfer_scenario_results: MLC_MLPERF_NOINFER_SCENARIO_RESULTS
Expand Down
3 changes: 2 additions & 1 deletion script/get-mlperf-inference-utils/mlperf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,8 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res,
result['performance'] = round(performance_result_, 3)

if scenario != effective_scenario:
inferred, inferred_result = checker.get_inferred_result(
print(f"{fname} : {scenario} : {effective_scenario}")
inferred, inferred_result, inferred_valid = checker.get_inferred_result(
scenario, effective_scenario, performance_result, mlperf_log, config, False)

if has_power:
Expand Down
Loading