diff --git a/.github/workflows/test-cm-based-submission-generation.yml b/.github/workflows/test-cm-based-submission-generation.yml index 68f46267aa..4bd9d94304 100644 --- a/.github/workflows/test-cm-based-submission-generation.yml +++ b/.github/workflows/test-cm-based-submission-generation.yml @@ -19,12 +19,29 @@ jobs: python-version: [ "3.12" ] division: ["closed", "open", "closed-open"] category: ["datacenter", "edge"] - case: ["closed", "closed-no-compliance", "closed-power", "closed-failed-power-logs", "case-3", "case-7", "case-8"] + case: ["closed", "closed-no-compliance", "closed-power", "closed-failed-power-logs", "case-1", "case-2", "case-3", "case-5", "case-6", "case-7", "case-8"] action: ["run", "docker"] exclude: - os: macos-latest - os: windows-latest - category: "edge" + - case: case-1 + division: closed + - case: case-1 + division: closed-open + - case: case-2 + division: closed + - case: case-2 + division: closed-open + - case: case-5 + division: closed + - case: case-5 + division: closed-open + - case: case-6 + division: closed + - case: case-6 + division: closed-open + steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} @@ -39,6 +56,7 @@ jobs: run: | git clone -b submission-generation-tests https://github.com/mlcommons/inference.git submission_generation_tests - name: Run Submission Generation - ${{ matrix.case }} ${{ matrix.action }} ${{ matrix.category }} ${{ matrix.division }} + continue-on-error: true run: | if [ "${{ matrix.case }}" == "case-3" ]; then description="Submission generation (model_mapping.json not present but model name matches with official one)" @@ -63,5 +81,18 @@ jobs: # Dynamically set the log group to simulate a dynamic step name echo "::group::$description" cm ${{ matrix.action }} script --tags=generate,inference,submission --adr.submission-checker-src.tags=_branch.dev --clean --preprocess_submission=yes --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=${{ matrix.division }} --env.CM_DETERMINE_MEMORY_CONFIGURATION=yes --quiet $extra_run_args + exit_status=$? + echo "Exit status for the job ${description} ${exit_status}" + if [[ "${{ matrix.case }}" == "case-5" || "${{ matrix.case }}" == "case-6" ]]; then + # For cases 5 and 6, exit status should be 0 if cm command fails, 1 if it succeeds + if [[ ${exit_status} -ne 0 ]]; then + exit 0 + else + exit ${exit_status} + fi + else + # For other cases, exit with the original status + test ${exit_status} -eq 0 || exit ${exit_status} + fi echo "::endgroup::" diff --git a/VERSION b/VERSION index 1d0ba9ea18..267577d47e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.4.0 +0.4.1 diff --git a/script/generate-mlperf-inference-submission/_cm.yaml b/script/generate-mlperf-inference-submission/_cm.yaml index 9e6200556b..32003a1b33 100644 --- a/script/generate-mlperf-inference-submission/_cm.yaml +++ b/script/generate-mlperf-inference-submission/_cm.yaml @@ -83,6 +83,7 @@ input_mapping: submitter: CM_MLPERF_SUBMITTER sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA tar: CM_TAR_SUBMISSION_DIR + get_platform_details: CM_GET_PLATFORM_DETAILS post_deps: - enable_if_env: CM_RUN_MLPERF_ACCURACY: diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 9156fbbe16..6516451fd2 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -240,6 +240,7 @@ def generate_submission(env, state, inp, submission_division): compliance_path = os.path.join(path_submission, "compliance", sub_res) system_path = os.path.join(path_submission, "systems") submission_system_path = system_path + if not os.path.isdir(submission_system_path): os.makedirs(submission_system_path) system_file = os.path.join(submission_system_path, sub_res+".json") @@ -273,6 +274,8 @@ def generate_submission(env, state, inp, submission_division): print('* MLPerf inference model: {}'.format(model)) for scenario in scenarios: + # the system_info.txt is copied from the mode directory if found, else it would be looked under scenario directory + system_info_from_mode = False results[model][scenario] = {} result_scenario_path = os.path.join(result_model_path, scenario) submission_scenario_path = os.path.join(submission_model_path, scenario) @@ -429,6 +432,8 @@ def generate_submission(env, state, inp, submission_division): elif f in [ "README.md", "README-extra.md", "cm-version-info.json", "os_info.json", "cpu_info.json", "pip_freeze.json", "system_info.txt", "cm-deps.png", "cm-deps.mmd" ] and mode == "performance": shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, f)) if f == "system_info.txt" and not platform_info_file: + # the first found system_info.txt will be taken as platform info file for a specific model to be placed in + # measurements-model folder when generating the final submission platform_info_file = os.path.join(result_mode_path, f) elif f in [ "console.out" ]: shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, mode+"_"+f)) @@ -445,6 +450,9 @@ def generate_submission(env, state, inp, submission_division): p_target = os.path.join(submission_results_path, f) shutil.copy(os.path.join(result_mode_path, f), p_target) + if os.path.exists(os.path.join(result_scenario_path, "system_info.txt")): + shutil.copy(os.path.join(result_scenario_path, "system_info.txt"), os.path.join(submission_measurement_path, f)) + platform_info_file = os.path.join(result_scenario_path, "system_info.txt") readme_file = os.path.join(submission_measurement_path, "README.md") if not os.path.exists(readme_file): @@ -459,24 +467,39 @@ def generate_submission(env, state, inp, submission_division): with open(readme_file, mode='a') as f: f.write(result_string) - #Copy system_info.txt to the submission measurements model folder if any scenario performance run has it + # Copy system_info.txt to the submission measurements model folder if any scenario performance run has it sys_info_file = None + if os.path.exists(os.path.join(result_model_path, "system_info.txt")): sys_info_file = os.path.join(result_model_path, "system_info.txt") elif platform_info_file: sys_info_file = platform_info_file + if sys_info_file: model_platform_info_file = sys_info_file shutil.copy(sys_info_file, os.path.join(measurement_model_path, "system_info.txt")) #Copy system_info.txt to the submission measurements folder if any model performance run has it sys_info_file = None + if os.path.exists(os.path.join(result_path, "system_info.txt")): sys_info_file = os.path.join(result_path, "system_info.txt") elif model_platform_info_file: sys_info_file = model_platform_info_file + if sys_info_file: shutil.copy(sys_info_file, os.path.join(measurement_path, "system_info.txt")) + else: + if env.get('CM_GET_PLATFORM_DETAILS', '') == "yes": + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': 'get,platform,details', + 'env': {'CM_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")}, + 'quiet': True + } + r = cmind.access(cm_input) + if r['return'] > 0: + return r with open(system_file, "w") as fp: