From 256a276babd62bb4e977080e1d86ac2a131a7580 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 10 Apr 2024 21:10:02 +0200 Subject: [PATCH 1/2] CM repo meta --- cmr.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 cmr.yaml diff --git a/cmr.yaml b/cmr.yaml new file mode 100644 index 0000000000..0a6633e9cf --- /dev/null +++ b/cmr.yaml @@ -0,0 +1,4 @@ +alias: mlcommons@cm4mlops +uid: 9e97bb72b0474657 + +git: true From 845b6c67b67c4cf0c1655cfb0c04db189717d78b Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Tue, 16 Apr 2024 14:45:41 +0200 Subject: [PATCH 2/2] first commit moving cm-mlops from mlcommons@ck --- .github/workflows/check-all-broken-links.md | 17 + .github/workflows/check-broken-links.md | 17 + .github/workflows/test-cm-script-features.yml | 38 + .github/workflows/test-cm-scripts.yml | 36 + .../workflows/test-cm-tutorial-retinanet.yml | 35 + .../workflows/test-cm-tutorial-tvm-pip.yml | 57 + .github/workflows/test-cm-tutorial-tvm.yml | 36 + .github/workflows/test-cm.yml | 69 + .../test-image-classification-onnx.yml | 36 + ...bert-deepsparse-tf-onnxruntime-pytorch.yml | 44 + .../workflows/test-mlperf-inference-gptj.yml | 38 + ...lperf-inference-mlcommons-cpp-resnet50.yml | 38 + .../test-mlperf-inference-resnet50.yml | 43 + .../test-mlperf-inference-retinanet.yml | 41 + .../workflows/test-mlperf-inference-rnnt.yml | 38 + .../workflows/test-mlperf-inference-tvm.yml | 38 + ...adgen-onnx-huggingface-bert-fp32-squad.yml | 36 + .../workflows/test-qaic-compute-sdk-build.yml | 35 + .github/workflows/test-qaic-software-kit.yml | 41 + .../workflows/update-script-dockerfiles.yml | 41 + .github/workflows/update-script-readme.yml | 46 + CHANGES.md | 213 + CONTRIBUTING.md | 70 +- LICENSE.third-party.md | 1 + README.md | 29 + automation/cache/README-extra.md | 71 + automation/cache/README.md | 87 + automation/cache/_cm.json | 12 + automation/cache/module.py | 212 + automation/cache/module_misc.py | 98 + automation/cfg/README.md | 27 + automation/cfg/_cm.json | 9 + automation/cfg/module.py | 52 + automation/challenge/README.md | 27 + automation/challenge/_cm.json | 9 + automation/challenge/module.py | 52 + automation/contributor/README.md | 47 + automation/contributor/_cm.json | 9 + automation/contributor/module.py | 153 + automation/data/_cm.json | 9 + automation/data/module.py | 52 + automation/docker/README.md | 27 + automation/docker/_cm.json | 11 + automation/docker/module.py | 51 + automation/docs/_cm.json | 9 + automation/docs/module.py | 52 + automation/experiment/README-extra.md | 315 + automation/experiment/README.md | 87 + automation/experiment/_cm.json | 11 + automation/experiment/module.py | 804 +++ automation/experiment/tests/test2.bat | 1 + automation/experiment/tests/test2.sh | 1 + automation/experiment/tests/test3.bat | 1 + automation/experiment/tests/test3.sh | 1 + automation/experiment/tests/test3_input.yaml | 4 + automation/experiment/tests/test__json.bat | 1 + automation/experiment/tests/test__json.sh | 1 + automation/experiment/tests/test__yaml.bat | 1 + automation/experiment/tests/test__yaml.sh | 1 + automation/experiment/tests/test_input.json | 14 + automation/experiment/tests/test_input.yaml | 4 + automation/experiment/tests/test_run.bat | 3 + automation/experiment/tests/test_run.sh | 1 + automation/project/README.md | 27 + automation/project/_cm.json | 10 + automation/project/module.py | 52 + automation/report/README.md | 27 + automation/report/_cm.json | 9 + automation/report/module.py | 52 + automation/script/README-extra.md | 1023 ++++ automation/script/README-specs.md | 81 + automation/script/README.md | 427 ++ automation/script/_cm.json | 16 + automation/script/assets/scripts-workflow.png | Bin 0 -> 242876 bytes automation/script/module.py | 5062 +++++++++++++++++ automation/script/module_help.py | 100 + automation/script/module_misc.py | 1990 +++++++ .../script/template-ae-python/README-extra.md | 2 + automation/script/template-ae-python/_cm.yaml | 38 + .../script/template-ae-python/analyze.bat | 12 + .../script/template-ae-python/analyze.sh | 12 + .../script/template-ae-python/customize.py | 22 + .../template-ae-python/install_deps.bat | 18 + .../script/template-ae-python/install_deps.sh | 17 + automation/script/template-ae-python/main.py | 10 + automation/script/template-ae-python/plot.bat | 12 + automation/script/template-ae-python/plot.sh | 12 + .../script/template-ae-python/reproduce.bat | 12 + .../script/template-ae-python/reproduce.sh | 12 + automation/script/template-ae-python/run.bat | 12 + automation/script/template-ae-python/run.sh | 12 + .../script/template-ae-python/validate.bat | 12 + .../script/template-ae-python/validate.sh | 12 + .../script/template-python/README-extra.md | 1 + automation/script/template-python/_cm.yaml | 23 + .../script/template-python/customize.py | 30 + automation/script/template-python/main.py | 10 + .../script/template-python/requirements.txt | 0 automation/script/template-python/run.bat | 25 + automation/script/template-python/run.sh | 24 + .../script/template-pytorch/README-extra.md | 1 + automation/script/template-pytorch/_cm.yaml | 42 + .../script/template-pytorch/customize.py | 30 + automation/script/template-pytorch/main.py | 15 + .../script/template-pytorch/requirements.txt | 0 automation/script/template-pytorch/run.bat | 25 + automation/script/template-pytorch/run.sh | 24 + automation/script/template/README-extra.md | 1 + automation/script/template/customize.py | 22 + automation/script/template/run.bat | 1 + automation/script/template/run.sh | 27 + automation/script/template_list_of_scripts.md | 52 + automation/utils/README.md | 387 ++ automation/utils/_cm.json | 12 + automation/utils/module.py | 986 ++++ automation/utils/module_cfg.py | 225 + cfg/benchmark-hardware-compute/_cm.json | 10 + .../amd-cpu-x64.json | 6 + cfg/benchmark-hardware-compute/amd-gpu.json | 6 + .../generic-cpu-arm64.json | 6 + .../google-tpu.json | 6 + .../habana-gaudi.json | 6 + .../intel-cpu-x64.json | 6 + .../nvidia-gpu-jetson-orin.yaml | 7 + .../nvidia-gpu.json | 6 + .../qualcomm-ai100.json | 6 + .../stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml | 5 + cfg/benchmark-list/_cm.json | 10 + cfg/benchmark-list/loadgen-cpp.yaml | 19 + cfg/benchmark-list/loadgen-python.yaml | 16 + cfg/benchmark-list/mlperf-abtf.yaml | 18 + cfg/benchmark-list/mlperf-inference.yaml | 28 + cfg/benchmark-list/mlperf-mobile.yaml | 14 + cfg/benchmark-list/mlperf-tiny.yaml | 16 + cfg/benchmark-list/mlperf-training.yaml | 18 + .../_cm.yaml | 45 + .../run-005147815bf840b8-input.json | 54 + .../run-005147815bf840b8-meta.json | 9 + .../run-005147815bf840b8-output.json | 11 + .../run-0eeb9799b12b488f-input.json | 55 + .../run-0eeb9799b12b488f-meta.json | 9 + .../run-0eeb9799b12b488f-output.json | 137 + .../run-52c1d43172664ed0-input.json | 55 + .../run-52c1d43172664ed0-meta.json | 9 + .../run-52c1d43172664ed0-output.json | 137 + .../run-66cce585ff0242bc-input.json | 56 + .../run-66cce585ff0242bc-meta.json | 9 + .../run-6a07cf881dee462a-input.json | 56 + .../run-6a07cf881dee462a-meta.json | 9 + .../run-7d80f464b2274742-input.json | 55 + .../run-7d80f464b2274742-meta.json | 10 + .../run-7d80f464b2274742-output.json | 137 + .../run-7f094c244ebb4985-input.json | 56 + .../run-7f094c244ebb4985-meta.json | 9 + .../run-7f094c244ebb4985-output.json | 146 + .../run-7f094c244ebb4985.md | 1 + .../run-d5b6b5af6d794045-input.json | 53 + .../run-d5b6b5af6d794045-meta.json | 9 + .../run-d8c0f02f52bf49ae-input.json | 53 + .../run-d8c0f02f52bf49ae-meta.json | 10 + .../run-d8c0f02f52bf49ae-output.json | 137 + .../run-df843c22cbf54aaf-input.json | 56 + .../run-df843c22cbf54aaf-meta.json | 9 + .../run-df843c22cbf54aaf-output.json | 146 + .../run-df843c22cbf54aaf.md | 1 + .../run-f05147815bf840b8-input.json | 55 + .../run-f05147815bf840b8-meta.json | 9 + .../run-f05147815bf840b8-output.json | 137 + .../_cm.yaml | 38 + script/README.md | 13 + script/activate-python-venv/README-extra.md | 7 + script/activate-python-venv/README.md | 123 + script/activate-python-venv/_cm.json | 25 + script/activate-python-venv/customize.py | 29 + script/activate-python-venv/run.bat | 7 + script/activate-python-venv/run.sh | 9 + .../add-custom-nvidia-system/README-extra.md | 2 + script/add-custom-nvidia-system/README.md | 177 + script/add-custom-nvidia-system/_cm.yaml | 113 + script/add-custom-nvidia-system/customize.py | 22 + script/add-custom-nvidia-system/run.sh | 5 + .../README-extra.md | 17 + .../README.md | 211 + .../app-image-classification-onnx-py/_cm.yaml | 116 + .../customize.py | 64 + .../img/computer_mouse.jpg | Bin 0 -> 41154 bytes .../requirements.txt | 0 .../app-image-classification-onnx-py/run.bat | 29 + .../app-image-classification-onnx-py/run.sh | 37 + .../src/onnx_classify.py | 172 + .../tests/README.md | 14 + .../README-extra.md | 3 + .../README.md | 135 + .../_cm.json | 46 + .../include/benchmark.h | 511 ++ .../run.sh | 6 + .../src/classification.cpp | 107 + .../README-extra.md | 16 + .../README.md | 168 + .../_cm.json | 89 + .../img/computer_mouse.jpg | Bin 0 -> 41154 bytes .../requirements.txt | 4 + .../app-image-classification-torch-py/run.bat | 20 + .../app-image-classification-torch-py/run.sh | 20 + .../src/pytorch_classify_preprocessed.py | 205 + .../README-extra.md | 16 + .../README.md | 160 + .../_cm.json | 73 + .../img/computer_mouse.jpg | Bin 0 -> 41154 bytes .../requirements.txt | 7 + .../run.sh | 26 + .../src/classify.py | 292 + .../README-extra.md | 25 + script/app-image-corner-detection/README.md | 129 + script/app-image-corner-detection/_cm.json | 34 + .../app-image-corner-detection/customize.py | 34 + script/app-image-corner-detection/run.sh | 6 + script/app-image-corner-detection/susan.c | 2161 +++++++ .../README-extra.md | 289 + script/app-loadgen-generic-python/README.md | 322 ++ script/app-loadgen-generic-python/_cm.yaml | 322 ++ .../app-loadgen-generic-python/customize.py | 101 + script/app-loadgen-generic-python/run.bat | 4 + script/app-loadgen-generic-python/run.sh | 4 + .../src/backend_onnxruntime.py | 89 + .../src/backend_pytorch.py | 126 + .../src/loadgen/harness.py | 76 + .../src/loadgen/model.py | 24 + .../src/loadgen/runners.py | 186 + script/app-loadgen-generic-python/src/main.py | 238 + .../app-loadgen-generic-python/src/utils.py | 16 + .../tests/modular-cm-containers/_common.bat | 7 + .../tests/modular-cm-containers/_common.sh | 10 + .../tests/modular-cm-containers/build.bat | 16 + .../tests/modular-cm-containers/build.sh | 18 + ...dgen-generic-python--ubuntu-cpu.Dockerfile | 96 + .../loadgen-generic-python-auto.Dockerfile | 33 + .../tests/modular-cm-containers/run.bat | 3 + .../tests/modular-cm-containers/run.sh | 3 + .../README.md | 368 ++ .../_cm.json | 427 ++ .../armnn/classification.cpp | 399 ++ .../customize.py | 96 + .../inc/benchmark.h | 488 ++ .../src/classification.cpp | 362 ++ script/app-mlperf-inference-dummy/README.md | 361 ++ script/app-mlperf-inference-dummy/_cm.yaml | 291 + .../app-mlperf-inference-dummy/customize.py | 60 + script/app-mlperf-inference-dummy/run.sh | 8 + script/app-mlperf-inference-intel/README.md | 613 ++ script/app-mlperf-inference-intel/_cm.yaml | 600 ++ .../build_bert_harness.sh | 51 + .../build_gptj_harness.sh | 39 + .../calibrate_gptj_int4_model.sh | 37 + .../app-mlperf-inference-intel/customize.py | 132 + .../run_bert_harness.sh | 25 + .../run_gptj_harness.sh | 50 + .../CONTRIBUTING.md | 29 + .../README-extra.md | 83 + .../README.md | 329 ++ .../_cm.yaml | 260 + .../customize.py | 98 + .../dockerfiles/ubuntu_22.04.Dockerfile | 38 + .../inc/backend.h | 304 + .../inc/common.h | 5 + .../inc/device.h | 64 + .../inc/gpu_device.h | 53 + .../inc/model.h | 124 + .../inc/npy.h | 143 + .../inc/onnxruntime_backend.h | 132 + .../inc/sample_library.h | 181 + .../inc/system.h | 135 + .../inc/tflite_backend.h | 132 + .../src/main.cpp | 214 + .../tests/win.bat | 8 + .../README-about.md | 7 + .../README-extra.md | 235 + .../README.md | 886 +++ .../_cm.yaml | 1166 ++++ .../customize.py | 372 ++ .../nvidia/retinanet.py | 550 ++ .../README-about.md | 137 + script/app-mlperf-inference-nvidia/README.md | 1305 +++++ script/app-mlperf-inference-nvidia/_cm.yaml | 1477 +++++ .../app-mlperf-inference-nvidia/customize.py | 428 ++ script/app-mlperf-inference-nvidia/run.sh | 8 + .../app-mlperf-inference-qualcomm/README.md | 746 +++ .../README_aws_dl2q.24xlarge.md | 97 + script/app-mlperf-inference-qualcomm/_cm.yaml | 775 +++ .../customize.py | 189 + script/app-mlperf-inference-qualcomm/run.sh | 8 + script/app-mlperf-inference/README-about.md | 22 + script/app-mlperf-inference/README-extra.md | 131 + script/app-mlperf-inference/README.md | 777 +++ script/app-mlperf-inference/_cm.yaml | 1258 ++++ .../app-mlperf-inference/build_dockerfiles.py | 98 + script/app-mlperf-inference/customize.py | 496 ++ .../dockerfiles/README.md | 2 + .../rhel_9_python_onnxruntime_cpu.Dockerfile | 48 + .../rhel_9_python_tensorflow_cpu.Dockerfile | 49 + ...tu_18.04_python_onnxruntime_cpu.Dockerfile | 48 + ...ntu_18.04_python_tensorflow_cpu.Dockerfile | 49 + ...tu_20.04_python_onnxruntime_cpu.Dockerfile | 48 + ...ntu_20.04_python_tensorflow_cpu.Dockerfile | 49 + ...tu_22.04_python_onnxruntime_cpu.Dockerfile | 48 + ...ntu_22.04_python_tensorflow_cpu.Dockerfile | 49 + .../dockerfiles/resnet50/_info.md | 4 + .../rhel_9_cpp_onnxruntime_cpu.Dockerfile | 49 + .../rhel_9_python_onnxruntime_cpu.Dockerfile | 49 + .../rhel_9_python_pytorch_cpu.Dockerfile | 42 + .../rhel_9_python_tensorflow_cpu.Dockerfile | 49 + ...buntu_18.04_cpp_onnxruntime_cpu.Dockerfile | 49 + ...tu_18.04_python_onnxruntime_cpu.Dockerfile | 49 + ...ubuntu_18.04_python_pytorch_cpu.Dockerfile | 42 + ...ntu_18.04_python_tensorflow_cpu.Dockerfile | 49 + ...buntu_20.04_cpp_onnxruntime_cpu.Dockerfile | 49 + ...tu_20.04_python_onnxruntime_cpu.Dockerfile | 49 + ...ubuntu_20.04_python_pytorch_cpu.Dockerfile | 42 + ...ntu_20.04_python_tensorflow_cpu.Dockerfile | 49 + ...buntu_22.04_cpp_onnxruntime_cpu.Dockerfile | 49 + ...tu_22.04_python_onnxruntime_cpu.Dockerfile | 49 + ...ubuntu_22.04_python_pytorch_cpu.Dockerfile | 42 + ...ntu_22.04_python_tensorflow_cpu.Dockerfile | 49 + .../dockerfiles/retinanet/_test.sh | 2 + .../rhel_9_cpp_onnxruntime_cpu.Dockerfile | 48 + .../rhel_9_python_onnxruntime_cpu.Dockerfile | 48 + .../rhel_9_python_pytorch_cpu.Dockerfile | 49 + ...buntu_18.04_cpp_onnxruntime_cpu.Dockerfile | 48 + ...tu_18.04_python_onnxruntime_cpu.Dockerfile | 48 + ...ubuntu_18.04_python_pytorch_cpu.Dockerfile | 49 + ...buntu_20.04_cpp_onnxruntime_cpu.Dockerfile | 48 + ...tu_20.04_python_onnxruntime_cpu.Dockerfile | 48 + ...ubuntu_20.04_python_pytorch_cpu.Dockerfile | 49 + ...buntu_22.04_cpp_onnxruntime_cpu.Dockerfile | 48 + ...tu_22.04_python_onnxruntime_cpu.Dockerfile | 48 + ...ubuntu_22.04_python_pytorch_cpu.Dockerfile | 49 + script/app-mlperf-inference/run.sh | 8 + script/app-mlperf-inference/run_config.yml | 11 + .../app-mlperf-inference/verify_accuracy.sh | 4 + script/app-mlperf-training-nvidia/README.md | 240 + script/app-mlperf-training-nvidia/_cm.yaml | 156 + .../app-mlperf-training-nvidia/customize.py | 57 + .../run-bert-training.sh | 9 + script/app-mlperf-training-nvidia/run.sh | 10 + .../app-mlperf-training-reference/README.md | 238 + script/app-mlperf-training-reference/_cm.yaml | 150 + .../customize.py | 52 + .../run-bert-training.sh | 27 + script/app-mlperf-training-reference/run.sh | 10 + .../README-extra.md | 30 + script/app-stable-diffusion-onnx-py/README.md | 201 + script/app-stable-diffusion-onnx-py/_cm.yaml | 110 + .../app-stable-diffusion-onnx-py/process.py | 34 + script/app-stable-diffusion-onnx-py/run.bat | 2 + script/app-stable-diffusion-onnx-py/run.sh | 4 + .../README.md | 270 + .../_cm.yaml | 318 ++ .../customize.py | 158 + .../run-template.sh | 90 + script/benchmark-program-mlperf/README.md | 153 + script/benchmark-program-mlperf/_cm.json | 57 + script/benchmark-program-mlperf/customize.py | 28 + script/benchmark-program/README.md | 152 + script/benchmark-program/_cm.json | 61 + script/benchmark-program/customize.py | 65 + script/benchmark-program/run-ubuntu.sh | 10 + script/benchmark-program/run.bat | 39 + script/benchmark-program/run.sh | 48 + script/build-docker-image/README-extra.md | 16 + script/build-docker-image/README.md | 159 + script/build-docker-image/_cm.json | 48 + script/build-docker-image/customize.py | 88 + .../build-docker-image/examples/0-common.bat | 21 + .../examples/0-generate.bat | 9 + .../build-docker-image/examples/1-build.bat | 8 + .../examples/2-run-cm-command1.bat | 3 + .../examples/2-run-cm-command2.bat | 3 + .../examples/2-run-cm-command3.bat | 3 + .../examples/2-run-cm-command4.bat | 3 + .../examples/2-run-cm-command5.bat | 3 + .../examples/2-run-interactive1.bat | 3 + .../examples/2-run-interactive2.bat | 3 + .../examples/3-push-to-docker-hub.bat | 3 + .../Dockerfile.cm-base-ubuntu-22.04-20230804 | 38 + .../Dockerfile.cm-base-ubuntu-23.04-20230804 | 38 + .../Dockerfile.cm-base-ubuntu-23.04-latest | 38 + ...classification-onnx-py-ubuntu-23.04-latest | 45 + script/build-docker-image/examples/README.md | 1 + .../examples/computer_mouse.jpg | Bin 0 -> 41154 bytes ...cm-script-app-image-classification-onnx-py | 6 + ...e-classification-python-onnx-with-file.bat | 6 + ...ge-classification-python-onnx-with-file.sh | 5 + ...un-cm-image-classification-python-onnx.bat | 1 + ...run-cm-image-classification-python-onnx.sh | 3 + script/build-docker-image/run.bat | 12 + script/build-docker-image/run.sh | 13 + script/build-dockerfile/README-extra.md | 27 + script/build-dockerfile/README.md | 185 + script/build-dockerfile/_cm.json | 69 + script/build-dockerfile/customize.py | 286 + .../dockerfiles/rhel_9.Dockerfile | 32 + .../dockerfiles/ubuntu_18.04.Dockerfile | 32 + .../dockerfiles/ubuntu_20.04.Dockerfile | 32 + .../dockerfiles/ubuntu_22.04.Dockerfile | 32 + script/build-dockerfile/dockerinfo.json | 80 + .../README-extra.md | 2 + .../README.md | 244 + .../_cm.yaml | 255 + .../customize.py | 41 + .../run.sh | 16 + script/calibrate-model-for.qaic/README.md | 288 + script/calibrate-model-for.qaic/_cm.json | 223 + script/calibrate-model-for.qaic/customize.py | 204 + script/calibrate-model-for.qaic/run.sh | 28 + script/compile-model-for.qaic/README.md | 435 ++ script/compile-model-for.qaic/_cm.json | 389 ++ script/compile-model-for.qaic/customize.py | 73 + script/compile-model-for.qaic/run.sh | 28 + script/compile-program/README-extra.md | 3 + script/compile-program/README.md | 130 + script/compile-program/_cm.json | 36 + script/compile-program/customize.py | 54 + script/compile-program/run.bat | 35 + script/compile-program/run.sh | 72 + script/convert-csv-to-md/README.md | 145 + script/convert-csv-to-md/_cm.json | 42 + script/convert-csv-to-md/customize.py | 28 + script/convert-csv-to-md/process.py | 10 + script/convert-csv-to-md/run.bat | 1 + script/convert-csv-to-md/run.sh | 28 + .../README.md | 145 + .../_cm.json | 44 + .../customize.py | 26 + .../run.sh | 2 + script/copy-to-clipboard/README.md | 143 + script/copy-to-clipboard/_cm.yaml | 32 + script/copy-to-clipboard/code.py | 11 + script/copy-to-clipboard/run.bat | 4 + script/copy-to-clipboard/run.sh | 4 + script/create-conda-env/README.md | 150 + script/create-conda-env/_cm.json | 43 + script/create-conda-env/customize.py | 31 + script/create-conda-env/run.sh | 7 + .../create-fpgaconvnet-app-tinyml/README.md | 158 + script/create-fpgaconvnet-app-tinyml/_cm.json | 59 + .../customize.py | 38 + script/create-fpgaconvnet-app-tinyml/run.sh | 26 + .../README.md | 175 + .../create-fpgaconvnet-config-tinyml/_cm.json | 58 + .../customize.py | 53 + .../create-fpgaconvnet-config-tinyml/run.sh | 26 + script/create-patch/README-extra.md | 5 + script/create-patch/README.md | 137 + script/create-patch/_cm.yaml | 22 + script/create-patch/customize.py | 53 + script/destroy-terraform/README-extra.md | 1 + script/destroy-terraform/README.md | 123 + script/destroy-terraform/_cm.json | 20 + script/destroy-terraform/customize.py | 18 + script/destroy-terraform/run.bat | 1 + script/destroy-terraform/run.sh | 6 + script/detect-cpu/README-extra.md | 17 + script/detect-cpu/README.md | 130 + script/detect-cpu/_cm.json | 31 + script/detect-cpu/customize.py | 172 + script/detect-cpu/run.bat | 2 + script/detect-cpu/run.sh | 11 + script/detect-os/README.md | 139 + script/detect-os/_cm.json | 38 + script/detect-os/customize.py | 100 + script/detect-os/run.bat | 1 + script/detect-os/run.sh | 21 + script/detect-os/run_config.yml | 6 + script/detect-sudo/README.md | 122 + script/detect-sudo/_cm.yaml | 17 + script/detect-sudo/customize.py | 32 + script/detect-sudo/run.sh | 27 + script/download-and-extract/README-extra.md | 109 + script/download-and-extract/README.md | 216 + script/download-and-extract/_cm.json | 162 + script/download-and-extract/customize.py | 61 + .../tests/download-and-extract-file.bat | 1 + .../tests/download-and-extract-file2.bat | 1 + script/download-file/README-extra.md | 98 + script/download-file/README.md | 204 + script/download-file/_cm.json | 93 + script/download-file/customize.py | 167 + script/download-file/run.bat | 56 + script/download-file/run.sh | 53 + script/download-file/tests/download-file.bat | 2 + script/download-file/tests/download-file2.bat | 1 + script/download-torrent/README.md | 157 + script/download-torrent/_cm.json | 41 + script/download-torrent/customize.py | 33 + script/download-torrent/run.sh | 34 + script/dump-pip-freeze/README.md | 121 + script/dump-pip-freeze/_cm.yaml | 16 + script/dump-pip-freeze/customize.py | 52 + script/dump-pip-freeze/dump.py | 21 + script/dump-pip-freeze/run.bat | 4 + script/dump-pip-freeze/run.sh | 28 + script/extract-file/README-extra.md | 115 + script/extract-file/README.md | 170 + script/extract-file/_cm.json | 53 + script/extract-file/customize.py | 197 + script/extract-file/run.bat | 39 + script/extract-file/run.sh | 21 + script/fail/README-extra.md | 1 + script/fail/README.md | 134 + script/fail/_cm.yaml | 18 + script/fail/customize.py | 28 + script/flash-tinyml-binary/README-extra.md | 16 + script/flash-tinyml-binary/README.md | 176 + script/flash-tinyml-binary/_cm.json | 57 + script/flash-tinyml-binary/customize.py | 19 + script/flash-tinyml-binary/run.sh | 11 + .../README-extra.md | 12 + .../README.md | 186 + .../_cm.json | 124 + .../customize.py | 339 ++ .../default_files/analyzer_table.md | 3 + .../default_files/power_settings.md | 1 + .../README.md | 200 + .../_cm.yaml | 92 + .../customize.py | 446 ++ .../README-extra.md | 55 + script/generate-mlperf-tiny-report/README.md | 147 + script/generate-mlperf-tiny-report/_cm.yaml | 42 + .../generate-mlperf-tiny-report/customize.py | 83 + .../run_submission_checker.bat | 10 + .../run_submission_checker.sh | 12 + .../README-extra.md | 3 + .../generate-mlperf-tiny-submission/README.md | 409 ++ .../generate-mlperf-tiny-submission/_cm.json | 40 + .../customize.py | 157 + script/generate-nvidia-engine/README-about.md | 1 + script/generate-nvidia-engine/README.md | 242 + script/generate-nvidia-engine/_cm.yaml | 152 + script/generate-nvidia-engine/customize.py | 30 + script/generate-nvidia-engine/run.sh | 7 + script/get-android-sdk/README-extra.md | 3 + script/get-android-sdk/README.md | 153 + script/get-android-sdk/_cm.json | 43 + script/get-android-sdk/customize.py | 169 + .../get-android-sdk/prepare-sdk-manager.bat | 27 + script/get-android-sdk/prepare-sdk-manager.sh | 26 + script/get-aocl/README-extra.md | 0 script/get-aocl/README.md | 139 + script/get-aocl/_cm.json | 51 + script/get-aocl/customize.py | 31 + script/get-aocl/run.sh | 9 + script/get-aria2/README-extra.md | 9 + script/get-aria2/README.md | 150 + script/get-aria2/_cm.yaml | 37 + script/get-aria2/customize.py | 122 + script/get-aria2/install.bat | 9 + script/get-aria2/install.sh | 47 + script/get-aria2/run.bat | 4 + script/get-aria2/run.sh | 6 + script/get-aws-cli/README-extra.md | 9 + script/get-aws-cli/README.md | 126 + script/get-aws-cli/_cm.json | 29 + script/get-aws-cli/customize.py | 59 + script/get-aws-cli/run.sh | 3 + script/get-bazel/README-extra.md | 9 + script/get-bazel/README.md | 128 + script/get-bazel/_cm.json | 28 + script/get-bazel/customize.py | 60 + script/get-bazel/run.bat | 2 + script/get-bazel/run.sh | 8 + script/get-bert-squad-vocab/README.md | 121 + script/get-bert-squad-vocab/_cm.json | 36 + script/get-blis/README-extra.md | 0 script/get-blis/README.md | 160 + script/get-blis/_cm.json | 72 + script/get-blis/customize.py | 36 + script/get-blis/run.bat | 1 + script/get-blis/run.sh | 12 + script/get-brew/README.md | 119 + script/get-brew/_cm.json | 22 + .../dockerfiles/ubuntu_22.04.Dockerfile | 36 + script/get-brew/run.sh | 3 + script/get-ck-repo-mlops/README.md | 122 + script/get-ck-repo-mlops/_cm.json | 18 + script/get-ck-repo-mlops/run.bat | 1 + script/get-ck-repo-mlops/run.sh | 4 + script/get-ck/README.md | 120 + script/get-ck/_cm.json | 13 + script/get-ck/run.bat | 1 + script/get-ck/run.sh | 3 + script/get-cl/README-extra.md | 7 + script/get-cl/README.md | 140 + script/get-cl/_cm.json | 30 + script/get-cl/customize.py | 136 + script/get-cl/run.bat | 3 + script/get-cmake/README.md | 131 + script/get-cmake/_cm.json | 40 + script/get-cmake/customize.py | 60 + script/get-cmake/run.bat | 2 + script/get-cmake/run.sh | 5 + script/get-cmsis_5/README-extra.md | 5 + script/get-cmsis_5/README.md | 151 + script/get-cmsis_5/_cm.json | 58 + script/get-cmsis_5/customize.py | 25 + script/get-cmsis_5/run.sh | 21 + script/get-compiler-flags/README.md | 131 + script/get-compiler-flags/_cm.json | 34 + script/get-compiler-flags/customize.py | 63 + script/get-compiler-rust/README.md | 122 + script/get-compiler-rust/_cm.yaml | 19 + script/get-compiler-rust/customize.py | 25 + script/get-compiler-rust/run.sh | 7 + script/get-conda/README.md | 166 + script/get-conda/_cm.json | 52 + script/get-conda/customize.py | 95 + script/get-conda/install.bat | 9 + script/get-conda/install.sh | 18 + script/get-conda/run.bat | 1 + script/get-conda/run.sh | 3 + script/get-croissant/README.md | 128 + script/get-croissant/_cm.yaml | 30 + script/get-croissant/customize.py | 16 + script/get-croissant/run.bat | 20 + script/get-croissant/run.sh | 21 + script/get-cuda-devices/README.md | 124 + script/get-cuda-devices/_cm.json | 36 + script/get-cuda-devices/customize.py | 37 + script/get-cuda-devices/print_cuda_devices.cu | 74 + script/get-cuda-devices/run.bat | 29 + script/get-cuda-devices/run.sh | 32 + script/get-cuda/README-about.md | 6 + script/get-cuda/README-extra.md | 44 + script/get-cuda/README.md | 229 + script/get-cuda/_cm.json | 130 + script/get-cuda/customize.py | 215 + script/get-cuda/run.bat | 3 + script/get-cuda/run.sh | 14 + script/get-cudnn/README-extra.md | 3 + script/get-cudnn/README.md | 168 + script/get-cudnn/_cm.json | 52 + script/get-cudnn/customize.py | 142 + script/get-cudnn/run.sh | 12 + script/get-dataset-cifar10/README.md | 166 + script/get-dataset-cifar10/_cm.json | 56 + script/get-dataset-cifar10/customize.py | 16 + script/get-dataset-cifar10/requirements.txt | 47 + script/get-dataset-cifar10/run.bat | 48 + script/get-dataset-cifar10/run.sh | 50 + script/get-dataset-cnndm/README.md | 176 + script/get-dataset-cnndm/_cm.json | 87 + script/get-dataset-cnndm/customize.py | 29 + script/get-dataset-cnndm/run-intel.sh | 15 + script/get-dataset-cnndm/run.sh | 21 + script/get-dataset-coco/README-extra.md | 95 + script/get-dataset-coco/README.md | 215 + script/get-dataset-coco/_cm.json | 137 + script/get-dataset-coco/customize.py | 203 + script/get-dataset-coco2014/README.md | 205 + script/get-dataset-coco2014/_cm.yaml | 89 + script/get-dataset-coco2014/customize.py | 27 + script/get-dataset-coco2014/run.bat | 21 + script/get-dataset-coco2014/run.sh | 27 + script/get-dataset-criteo/README-extra.md | 9 + script/get-dataset-criteo/README.md | 156 + script/get-dataset-criteo/_cm.json | 38 + script/get-dataset-criteo/run.sh | 26 + script/get-dataset-imagenet-aux/README.md | 157 + script/get-dataset-imagenet-aux/_cm.json | 44 + script/get-dataset-imagenet-aux/run.bat | 16 + script/get-dataset-imagenet-aux/run.sh | 15 + .../README.md | 148 + .../get-dataset-imagenet-calibration/_cm.yaml | 46 + script/get-dataset-imagenet-helper/README.md | 122 + script/get-dataset-imagenet-helper/_cm.json | 18 + .../get-dataset-imagenet-helper/customize.py | 12 + .../imagenet_helper/__init__.py | 139 + script/get-dataset-imagenet-train/README.md | 149 + script/get-dataset-imagenet-train/_cm.json | 60 + .../get-dataset-imagenet-train/customize.py | 66 + script/get-dataset-imagenet-train/run.sh | 3 + .../get-dataset-imagenet-val/README-extra.md | 28 + script/get-dataset-imagenet-val/README.md | 209 + script/get-dataset-imagenet-val/_cm.json | 114 + script/get-dataset-imagenet-val/customize.py | 79 + script/get-dataset-imagenet-val/run.bat | 17 + script/get-dataset-kits19/README.md | 174 + script/get-dataset-kits19/_cm.json | 85 + script/get-dataset-kits19/customize.py | 39 + script/get-dataset-kits19/run.sh | 43 + .../get-dataset-librispeech/README-extra.md | 26 + script/get-dataset-librispeech/README.md | 136 + script/get-dataset-librispeech/_cm.json | 86 + script/get-dataset-librispeech/customize.py | 19 + script/get-dataset-librispeech/run.sh | 8 + .../README.md | 146 + .../_cm.json | 29 + .../customize.py | 24 + .../get-dataset-openimages-annotations/run.sh | 9 + .../README.md | 180 + .../_cm.yaml | 60 + .../customize.py | 27 + .../filter.py | 20 + .../run-filter.sh | 6 + script/get-dataset-openimages/README-extra.md | 2 + script/get-dataset-openimages/README.md | 251 + script/get-dataset-openimages/_cm.json | 180 + script/get-dataset-openimages/customize.py | 82 + script/get-dataset-openimages/run.bat | 24 + script/get-dataset-openimages/run.sh | 40 + script/get-dataset-openorca/README.md | 175 + script/get-dataset-openorca/_cm.json | 80 + script/get-dataset-openorca/customize.py | 20 + script/get-dataset-squad-vocab/README.md | 144 + script/get-dataset-squad-vocab/_cm.json | 30 + script/get-dataset-squad-vocab/customize.py | 19 + script/get-dataset-squad-vocab/run.sh | 4 + script/get-dataset-squad/README-extra.md | 20 + script/get-dataset-squad/README.md | 131 + script/get-dataset-squad/_cm.json | 44 + script/get-dataset-squad/customize.py | 20 + script/get-dataset-squad/run.sh | 5 + .../get-dlrm-data-mlperf-inference/README.md | 153 + .../get-dlrm-data-mlperf-inference/_cm.yaml | 22 + .../customize.py | 71 + script/get-dlrm-data-mlperf-inference/run.sh | 27 + script/get-dlrm/README-extra.md | 15 + script/get-dlrm/README.md | 145 + script/get-dlrm/_cm.json | 41 + script/get-dlrm/customize.py | 37 + script/get-dlrm/run.sh | 12 + script/get-docker/README.md | 121 + script/get-docker/_cm.json | 29 + script/get-docker/run-ubuntu.sh | 40 + script/get-gcc/README-extra.md | 15 + script/get-gcc/README.md | 156 + script/get-gcc/_cm.json | 42 + script/get-gcc/customize.py | 102 + script/get-gcc/run.bat | 3 + script/get-gcc/run.sh | 8 + script/get-generic-python-lib/README-extra.md | 6 + script/get-generic-python-lib/README.md | 679 +++ script/get-generic-python-lib/_cm.json | 1073 ++++ script/get-generic-python-lib/customize.py | 149 + .../get-generic-python-lib/detect-version.py | 34 + script/get-generic-python-lib/install.bat | 15 + script/get-generic-python-lib/install.sh | 37 + script/get-generic-python-lib/run.bat | 4 + script/get-generic-python-lib/run.sh | 7 + .../tensorflow/run-aarch64.sh | 13 + .../tensorflow/run-macos.sh | 7 + .../get-generic-python-lib/uninstall_deps.sh | 8 + script/get-generic-sys-util/README.md | 228 + script/get-generic-sys-util/_cm.json | 317 ++ script/get-generic-sys-util/customize.py | 53 + script/get-generic-sys-util/run.sh | 6 + script/get-git-repo/README-extra.md | 20 + script/get-git-repo/README.md | 241 + script/get-git-repo/_cm.json | 124 + script/get-git-repo/customize.py | 89 + script/get-git-repo/run.bat | 67 + script/get-git-repo/run.sh | 74 + script/get-github-cli/README.md | 122 + script/get-github-cli/_cm.json | 22 + script/get-github-cli/customize.py | 54 + script/get-github-cli/run.bat | 1 + script/get-github-cli/run.sh | 2 + script/get-go/README-extra.md | 10 + script/get-go/README.md | 127 + script/get-go/_cm.json | 33 + script/get-go/customize.py | 59 + script/get-go/run.sh | 3 + script/get-google-saxml/README.md | 135 + script/get-google-saxml/_cm.yaml | 45 + script/get-google-saxml/customize.py | 22 + script/get-google-saxml/run.bat | 3 + script/get-google-saxml/run.sh | 3 + script/get-google-test/README.md | 139 + script/get-google-test/_cm.json | 52 + script/get-google-test/customize.py | 33 + script/get-google-test/run.sh | 23 + script/get-ipol-src/README-extra.md | 1 + script/get-ipol-src/README.md | 148 + script/get-ipol-src/_cm.json | 41 + script/get-ipol-src/customize.py | 58 + script/get-ipol-src/patch/20240127.patch | 10 + script/get-java/README-extra.md | 6 + script/get-java/README.md | 167 + script/get-java/_cm.json | 38 + script/get-java/customize.py | 137 + script/get-java/install-prebuilt.bat | 9 + script/get-java/install-prebuilt.sh | 15 + script/get-java/run.bat | 3 + script/get-java/run.sh | 3 + script/get-javac/README-extra.md | 6 + script/get-javac/README.md | 170 + script/get-javac/_cm.json | 39 + script/get-javac/customize.py | 148 + script/get-javac/install-prebuilt.bat | 9 + script/get-javac/install-prebuilt.sh | 15 + script/get-javac/run.bat | 3 + script/get-javac/run.sh | 3 + script/get-lib-armnn/README.md | 134 + script/get-lib-armnn/_cm.json | 58 + script/get-lib-armnn/customize.py | 51 + script/get-lib-armnn/run.sh | 9 + script/get-lib-dnnl/README.md | 134 + script/get-lib-dnnl/_cm.json | 48 + script/get-lib-dnnl/customize.py | 28 + script/get-lib-dnnl/run.sh | 43 + script/get-lib-protobuf/README.md | 156 + script/get-lib-protobuf/_cm.json | 64 + script/get-lib-protobuf/customize.py | 39 + script/get-lib-protobuf/run.sh | 15 + script/get-lib-qaic-api/README.md | 133 + script/get-lib-qaic-api/_cm.json | 39 + script/get-lib-qaic-api/customize.py | 39 + script/get-lib-qaic-api/master/QAicInfApi.cpp | 750 +++ script/get-lib-qaic-api/master/QAicInfApi.h | 146 + script/get-lib-qaic-api/run.sh | 4 + script/get-llvm/README-extra.md | 96 + script/get-llvm/README.md | 176 + script/get-llvm/_cm.json | 68 + script/get-llvm/customize.py | 91 + script/get-llvm/run.bat | 3 + script/get-llvm/run.sh | 4 + script/get-microtvm/README-extra.md | 5 + script/get-microtvm/README.md | 164 + script/get-microtvm/_cm.json | 56 + script/get-microtvm/customize.py | 26 + script/get-microtvm/run.sh | 12 + script/get-ml-model-3d-unet-kits19/README.md | 202 + script/get-ml-model-3d-unet-kits19/_cm.json | 94 + .../get-ml-model-3d-unet-kits19/customize.py | 38 + script/get-ml-model-bert-base-squad/README.md | 184 + script/get-ml-model-bert-base-squad/_cm.json | 92 + .../get-ml-model-bert-large-squad/README.md | 358 ++ script/get-ml-model-bert-large-squad/_cm.json | 362 ++ .../customize.py | 32 + .../run-packed.sh | 6 + script/get-ml-model-dlrm-terabyte/README.md | 264 + script/get-ml-model-dlrm-terabyte/_cm.json | 164 + script/get-ml-model-dlrm-terabyte/run.sh | 4 + .../get-ml-model-efficientnet-lite/README.md | 250 + .../get-ml-model-efficientnet-lite/_cm.json | 167 + .../customize.py | 52 + script/get-ml-model-gptj/README.md | 322 ++ script/get-ml-model-gptj/_cm.json | 293 + script/get-ml-model-gptj/convert_gptj_ckpt.py | 179 + script/get-ml-model-gptj/customize.py | 65 + .../get-ml-model-gptj/run-int4-calibration.sh | 10 + script/get-ml-model-gptj/run-intel.sh | 18 + .../get-ml-model-gptj/run-saxml-quantized.sh | 6 + script/get-ml-model-gptj/run-saxml.sh | 8 + .../README-extra.md | 21 + script/get-ml-model-huggingface-zoo/README.md | 194 + script/get-ml-model-huggingface-zoo/_cm.json | 88 + .../get-ml-model-huggingface-zoo/customize.py | 50 + .../download_model.py | 107 + script/get-ml-model-huggingface-zoo/run.bat | 2 + script/get-ml-model-huggingface-zoo/run.sh | 4 + script/get-ml-model-llama2/README.md | 223 + script/get-ml-model-llama2/_cm.json | 127 + script/get-ml-model-llama2/customize.py | 24 + script/get-ml-model-mobilenet/README-extra.md | 15 + script/get-ml-model-mobilenet/README.md | 472 ++ script/get-ml-model-mobilenet/_cm.json | 382 ++ script/get-ml-model-mobilenet/customize.py | 52 + script/get-ml-model-neuralmagic-zoo/README.md | 337 ++ script/get-ml-model-neuralmagic-zoo/_cm.json | 302 + .../get-ml-model-neuralmagic-zoo/customize.py | 43 + .../download_sparse.py | 10 + script/get-ml-model-neuralmagic-zoo/run.bat | 2 + script/get-ml-model-neuralmagic-zoo/run.sh | 2 + script/get-ml-model-resnet50/README-extra.md | 15 + script/get-ml-model-resnet50/README.md | 358 ++ script/get-ml-model-resnet50/_cm.json | 307 + script/get-ml-model-resnet50/customize.py | 28 + script/get-ml-model-resnet50/run-fix-input.sh | 10 + script/get-ml-model-resnet50/run_config.yml | 6 + .../get-ml-model-retinanet-nvidia/README.md | 171 + script/get-ml-model-retinanet-nvidia/_cm.json | 84 + .../customize.py | 23 + .../nvidia_patch_retinanet_efficientnms.py | 109 + .../polygraphy_script.sh | 24 + script/get-ml-model-retinanet-nvidia/run.sh | 16 + script/get-ml-model-retinanet/README-extra.md | 16 + script/get-ml-model-retinanet/README.md | 226 + script/get-ml-model-retinanet/_cm.json | 176 + script/get-ml-model-retinanet/customize.py | 30 + .../node-precision-info.py | 69 + script/get-ml-model-retinanet/run-no-nms.sh | 35 + script/get-ml-model-rnnt/README.md | 194 + script/get-ml-model-rnnt/_cm.json | 74 + script/get-ml-model-rnnt/customize.py | 38 + .../get-ml-model-stable-diffusion/README.md | 256 + script/get-ml-model-stable-diffusion/_cm.json | 186 + .../customize.py | 23 + script/get-ml-model-tiny-resnet/README.md | 214 + script/get-ml-model-tiny-resnet/_cm.json | 144 + script/get-ml-model-tiny-resnet/customize.py | 25 + script/get-ml-model-tiny-resnet/run.sh | 4 + .../get-ml-model-tiny-resnet/run_config.yml | 6 + .../README.md | 149 + .../_cm.json | 58 + .../customize.py | 20 + .../README.md | 163 + .../_cm.json | 48 + .../customize.py | 27 + .../run.bat | 1 + .../run.sh | 32 + .../README-extra.md | 26 + script/get-mlperf-inference-loadgen/README.md | 222 + script/get-mlperf-inference-loadgen/_cm.yaml | 169 + .../get-mlperf-inference-loadgen/customize.py | 46 + script/get-mlperf-inference-loadgen/run.bat | 39 + script/get-mlperf-inference-loadgen/run.sh | 55 + .../tests/download-and-install.bat | 2 + .../README-extra.md | 9 + .../README.md | 152 + .../_cm.json | 67 + .../customize.py | 19 + .../README-extra.md | 1 + .../README.md | 164 + .../_cm.json | 49 + .../customize.py | 31 + .../run.bat | 1 + .../run.sh | 32 + .../README.md | 161 + .../get-mlperf-inference-results-dir/_cm.json | 48 + .../customize.py | 27 + .../README-extra.md | 18 + script/get-mlperf-inference-results/README.md | 165 + script/get-mlperf-inference-results/_cm.json | 90 + .../get-mlperf-inference-results/customize.py | 46 + .../get-mlperf-inference-src/README-extra.md | 29 + script/get-mlperf-inference-src/README.md | 268 + script/get-mlperf-inference-src/_cm.json | 262 + script/get-mlperf-inference-src/customize.py | 106 + .../get-mlperf-inference-src/patch/coco.patch | 24 + .../get-mlperf-inference-src/patch/git.patch | 1925 +++++++ .../patch/openimages-pycocotools.patch | 24 + .../patch/windows-openimages.patch | 64 + .../patch/windows-openimages2.patch | 11 + .../README.md | 161 + .../_cm.json | 48 + .../customize.py | 29 + .../README-extra.md | 6 + .../README.md | 163 + .../get-mlperf-inference-sut-configs/_cm.json | 50 + .../configs/default/config.yaml | 73 + .../default/default/default-config.yaml | 55 + .../tensorrt-framework/default-config.yaml | 55 + .../default-config.yaml | 54 + .../customize.py | 63 + .../README.md | 159 + .../_cm.json | 75 + .../customize.py | 158 + .../detect_memory.sh | 7 + .../get_memory_info.py | 60 + .../hardware/default.json | 26 + script/get-mlperf-inference-utils/README.md | 120 + script/get-mlperf-inference-utils/_cm.yaml | 18 + .../get-mlperf-inference-utils/customize.py | 32 + .../mlperf_utils.py | 301 + script/get-mlperf-logging/README-extra.md | 16 + script/get-mlperf-logging/README.md | 129 + script/get-mlperf-logging/_cm.json | 38 + script/get-mlperf-logging/customize.py | 21 + script/get-mlperf-power-dev/README.md | 173 + script/get-mlperf-power-dev/_cm.json | 87 + script/get-mlperf-power-dev/customize.py | 21 + .../README.md | 131 + .../_cm.json | 28 + .../customize.py | 58 + .../run.bat | 72 + .../run.sh | 39 + script/get-mlperf-tiny-src/README.md | 145 + script/get-mlperf-tiny-src/_cm.json | 42 + script/get-mlperf-tiny-src/customize.py | 46 + script/get-mlperf-tiny-src/run.bat | 72 + script/get-mlperf-tiny-src/run.sh | 39 + .../get-mlperf-training-nvidia-code/README.md | 160 + .../get-mlperf-training-nvidia-code/_cm.json | 79 + .../customize.py | 22 + .../get-mlperf-training-src/README-extra.md | 27 + script/get-mlperf-training-src/README.md | 226 + script/get-mlperf-training-src/_cm.json | 134 + script/get-mlperf-training-src/customize.py | 27 + .../patch/cpu_load.patch | 16 + .../patch/nvidia-retinanet.patch | 170 + script/get-nvidia-docker/README.md | 123 + script/get-nvidia-docker/_cm.json | 34 + script/get-nvidia-docker/run-ubuntu.sh | 32 + script/get-nvidia-mitten/README-extra.md | 1 + script/get-nvidia-mitten/README.md | 134 + script/get-nvidia-mitten/_cm.json | 52 + script/get-nvidia-mitten/customize.py | 22 + script/get-nvidia-mitten/run.bat | 3 + script/get-nvidia-mitten/run.sh | 4 + script/get-onnxruntime-prebuilt/README.md | 159 + script/get-onnxruntime-prebuilt/_cm.json | 48 + script/get-onnxruntime-prebuilt/customize.py | 74 + script/get-onnxruntime-prebuilt/run.bat | 10 + script/get-onnxruntime-prebuilt/run.sh | 14 + script/get-openssl/README-extra.md | 8 + script/get-openssl/README.md | 126 + script/get-openssl/_cm.json | 33 + script/get-openssl/customize.py | 57 + script/get-openssl/run.sh | 4 + .../README-extra.md | 16 + .../get-preprocessed-dataset-criteo/README.md | 227 + .../get-preprocessed-dataset-criteo/_cm.json | 150 + .../customize.py | 31 + .../preprocess.py | 32 + .../run-multihot.sh | 6 + script/get-preprocessed-dataset-criteo/run.sh | 8 + .../README.md | 119 + .../get-preprocessed-dataset-generic/_cm.json | 18 + .../customize.py | 10 + .../src/generic_preprocess.py | 213 + .../preprocess_object_detection_dataset.py | 167 + .../README-extra.md | 26 + .../README.md | 449 ++ .../_cm.json | 388 ++ .../customize.py | 61 + .../preprocess.py | 40 + .../get-preprocessed-dataset-imagenet/run.bat | 4 + .../get-preprocessed-dataset-imagenet/run.sh | 6 + .../get-preprocessed-dataset-kits19/README.md | 234 + .../get-preprocessed-dataset-kits19/_cm.json | 154 + .../customize.py | 21 + script/get-preprocessed-dataset-kits19/run.sh | 6 + .../README.md | 224 + .../_cm.json | 149 + .../customize.py | 21 + .../run.sh | 6 + .../README-extra.md | 28 + .../README.md | 403 ++ .../_cm.json | 351 ++ .../customize.py | 50 + .../nvidia_preprocess.py | 150 + .../preprocess.py | 47 + .../run.bat | 1 + .../run.sh | 3 + .../README.md | 180 + .../_cm.json | 108 + .../customize.py | 25 + .../get-preprocessed-dataset-openorca/run.sh | 5 + .../get-preprocessed-dataset-squad/README.md | 240 + .../get-preprocessed-dataset-squad/_cm.yaml | 93 + .../customize.py | 62 + .../run-packed.sh | 32 + script/get-preprocessed-dataset-squad/run.sh | 41 + script/get-python3/README-extra.md | 70 + script/get-python3/README.md | 170 + script/get-python3/_cm.json | 75 + script/get-python3/customize.py | 130 + script/get-python3/run.bat | 2 + script/get-python3/run.sh | 13 + script/get-qaic-apps-sdk/README.md | 126 + script/get-qaic-apps-sdk/_cm.json | 35 + script/get-qaic-apps-sdk/customize.py | 109 + script/get-qaic-platform-sdk/README.md | 130 + script/get-qaic-platform-sdk/_cm.json | 39 + script/get-qaic-platform-sdk/customize.py | 110 + script/get-qaic-software-kit/README.md | 178 + script/get-qaic-software-kit/_cm.json | 82 + script/get-qaic-software-kit/customize.py | 62 + script/get-qaic-software-kit/run.sh | 17 + script/get-rclone/README.md | 152 + script/get-rclone/_cm.json | 39 + script/get-rclone/configs/rclone.conf | 8 + script/get-rclone/customize.py | 124 + script/get-rclone/install-system-macos.sh | 3 + script/get-rclone/install-system.sh | 3 + script/get-rclone/install.bat | 12 + script/get-rclone/install.sh | 13 + script/get-rclone/run.bat | 5 + script/get-rclone/run.sh | 11 + script/get-rocm/README.md | 127 + script/get-rocm/_cm.json | 29 + script/get-rocm/customize.py | 61 + script/get-rocm/run.sh | 5 + script/get-spec-ptd/README-extra.md | 16 + script/get-spec-ptd/README.md | 166 + script/get-spec-ptd/_cm.json | 71 + script/get-spec-ptd/customize.py | 25 + script/get-spec-ptd/run.sh | 11 + script/get-sys-utils-cm/README.md | 158 + script/get-sys-utils-cm/_cm.json | 38 + script/get-sys-utils-cm/customize.py | 85 + script/get-sys-utils-cm/do_pip_installs.sh | 6 + .../get-sys-utils-cm/do_pip_installs.sh.old | 6 + script/get-sys-utils-cm/requirements.txt | 5 + script/get-sys-utils-cm/run-arch.sh | 35 + script/get-sys-utils-cm/run-debian.sh | 56 + script/get-sys-utils-cm/run-macos.sh | 39 + script/get-sys-utils-cm/run-rhel.sh | 42 + script/get-sys-utils-cm/run-sles.sh | 38 + script/get-sys-utils-cm/run-ubuntu.sh | 60 + script/get-sys-utils-min/README.md | 119 + script/get-sys-utils-min/_cm.json | 22 + script/get-sys-utils-min/customize.py | 64 + script/get-tensorrt/README-extra.md | 11 + script/get-tensorrt/README.md | 178 + script/get-tensorrt/_cm.json | 51 + script/get-tensorrt/customize.py | 142 + script/get-tensorrt/run.sh | 41 + script/get-terraform/README-extra.md | 9 + script/get-terraform/README.md | 127 + script/get-terraform/_cm.json | 29 + script/get-terraform/customize.py | 59 + script/get-terraform/run.sh | 3 + script/get-tvm-model/README-extra.md | 21 + script/get-tvm-model/README.md | 290 + script/get-tvm-model/_cm.json | 202 + script/get-tvm-model/customize.py | 54 + script/get-tvm-model/process.py | 252 + script/get-tvm-model/run.sh | 7 + script/get-tvm/README-extra.md | 5 + script/get-tvm/README.md | 200 + script/get-tvm/_cm.json | 124 + script/get-tvm/customize.py | 50 + script/get-tvm/run.sh | 80 + script/get-xilinx-sdk/README.md | 140 + script/get-xilinx-sdk/_cm.json | 35 + script/get-xilinx-sdk/customize.py | 32 + script/get-xilinx-sdk/run.sh | 27 + script/get-zendnn/README.md | 129 + script/get-zendnn/_cm.json | 41 + script/get-zendnn/customize.py | 27 + script/get-zendnn/run.bat | 1 + script/get-zendnn/run.sh | 9 + script/get-zephyr-sdk/README-extra.md | 19 + script/get-zephyr-sdk/README.md | 128 + script/get-zephyr-sdk/_cm.json | 39 + script/get-zephyr-sdk/customize.py | 15 + script/get-zephyr-sdk/run.sh | 21 + script/get-zephyr/README-extra.md | 8 + script/get-zephyr/README.md | 134 + script/get-zephyr/_cm.json | 41 + script/get-zephyr/customize.py | 17 + script/get-zephyr/run-ubuntu.sh | 4 + script/get-zephyr/run.sh | 22 + script/gui/README-about.md | 15 + script/gui/README.md | 245 + script/gui/_cm.yaml | 106 + script/gui/app.py | 72 + script/gui/customize.py | 64 + script/gui/graph.py | 778 +++ script/gui/install/linux.md | 10 + script/gui/install/macos.md | 22 + script/gui/install/redhat.md | 7 + script/gui/install/windows.md | 15 + script/gui/misc.py | 220 + script/gui/playground.py | 203 + script/gui/playground_apps.py | 40 + script/gui/playground_beta.py | 35 + script/gui/playground_beta_README.md | 10 + script/gui/playground_challenges.py | 502 ++ .../gui/playground_challenges_with_prizes.py | 456 ++ script/gui/playground_contributors.py | 358 ++ script/gui/playground_howtorun.py | 301 + script/gui/playground_install.py | 141 + script/gui/playground_reports.py | 136 + script/gui/playground_reproduce.py | 436 ++ script/gui/playground_scripts.py | 317 ++ script/gui/run.bat | 2 + script/gui/run.sh | 4 + script/gui/script.py | 484 ++ script/gui/tests/README.md | 3 + script/gui/tests/generate_password.py | 13 + script/gui/tests/test.cmd | 1 + script/gui/tests/test2.cmd | 1 + script/gui/tests/test3.cmd | 1 + script/gui/tests/test4.cmd | 1 + script/gui/tests/test4a.cmd | 2 + script/gui/tests/test4b.cmd | 2 + script/gui/tests/test5.cmd | 1 + script/import-experiment-to-sqlite/README.md | 155 + .../README-extra.md | 82 + .../README.md | 154 + .../_cm.yaml | 38 + .../customize.py | 332 ++ .../README-extra.md | 68 + .../README.md | 137 + .../import-mlperf-tiny-to-experiment/_cm.yaml | 33 + ...d-customization-of-tinymlperf-results2.png | Bin 0 -> 118877 bytes .../customize.py | 458 ++ .../README-extra.md | 54 + .../README.md | 143 + .../_cm.yaml | 39 + .../customize.py | 335 ++ .../run_mlperf_logger.sh | 9 + script/install-aws-cli/README.md | 124 + script/install-aws-cli/_cm.json | 33 + script/install-aws-cli/customize.py | 17 + script/install-aws-cli/run.sh | 14 + script/install-bazel/README.md | 135 + script/install-bazel/_cm.json | 35 + script/install-bazel/customize.py | 60 + script/install-bazel/run-aarch64.sh | 25 + script/install-bazel/run.bat | 9 + script/install-bazel/run.sh | 28 + script/install-cmake-prebuilt/README.md | 137 + script/install-cmake-prebuilt/_cm.json | 38 + script/install-cmake-prebuilt/customize.py | 116 + script/install-cmake-prebuilt/run.sh | 10 + script/install-cuda-package-manager/README.md | 125 + script/install-cuda-package-manager/_cm.json | 30 + .../install-cuda-package-manager/customize.py | 16 + .../run-ubuntu.sh | 1 + script/install-cuda-package-manager/run.sh | 5 + script/install-cuda-prebuilt/README-extra.md | 4 + script/install-cuda-prebuilt/README.md | 181 + script/install-cuda-prebuilt/_cm.json | 106 + script/install-cuda-prebuilt/customize.py | 32 + script/install-cuda-prebuilt/run.sh | 10 + script/install-gcc-src/README.md | 128 + script/install-gcc-src/_cm.json | 40 + script/install-gcc-src/customize.py | 28 + script/install-gcc-src/run.sh | 41 + .../install-generic-conda-package/README.md | 160 + script/install-generic-conda-package/_cm.json | 70 + .../customize.py | 41 + script/install-generic-conda-package/run.sh | 7 + script/install-gflags/README.md | 129 + script/install-gflags/_cm.json | 37 + script/install-gflags/customize.py | 28 + script/install-gflags/run.sh | 18 + script/install-github-cli/README.md | 123 + script/install-github-cli/_cm.json | 21 + script/install-github-cli/customize.py | 13 + script/install-github-cli/run-macos.sh | 1 + script/install-github-cli/run-rhel.sh | 3 + script/install-github-cli/run.sh | 7 + script/install-ipex-from-src/README.md | 198 + script/install-ipex-from-src/_cm.json | 164 + script/install-ipex-from-src/customize.py | 23 + script/install-ipex-from-src/run.sh | 11 + script/install-llvm-prebuilt/README-extra.md | 99 + script/install-llvm-prebuilt/README.md | 138 + script/install-llvm-prebuilt/_cm.json | 40 + script/install-llvm-prebuilt/customize.py | 208 + script/install-llvm-prebuilt/run.bat | 3 + script/install-llvm-prebuilt/run.sh | 10 + script/install-llvm-src/README.md | 291 + script/install-llvm-src/_cm.json | 307 + script/install-llvm-src/customize.py | 69 + .../install-llvm-16-intel-mlperf-inference.sh | 21 + script/install-llvm-src/run.sh | 44 + .../install-mlperf-logging-from-src/README.md | 128 + .../install-mlperf-logging-from-src/_cm.yaml | 36 + .../customize.py | 22 + script/install-mlperf-logging-from-src/run.sh | 29 + script/install-nccl-libs/README.md | 135 + script/install-nccl-libs/_cm.yaml | 13 + script/install-nccl-libs/customize.py | 22 + script/install-nccl-libs/run-ubuntu.sh | 2 + script/install-nccl-libs/run.sh | 27 + script/install-numactl-from-src/README.md | 172 + script/install-numactl-from-src/_cm.json | 94 + script/install-numactl-from-src/customize.py | 23 + script/install-numactl-from-src/run.sh | 19 + script/install-onednn-from-src/README.md | 182 + script/install-onednn-from-src/_cm.json | 122 + script/install-onednn-from-src/customize.py | 25 + .../run-intel-mlperf-inference.sh | 20 + script/install-onnxruntime-from-src/README.md | 185 + script/install-onnxruntime-from-src/_cm.json | 121 + .../install-onnxruntime-from-src/customize.py | 22 + script/install-onnxruntime-from-src/run.sh | 15 + script/install-openssl/README.md | 135 + script/install-openssl/_cm.json | 45 + script/install-openssl/customize.py | 39 + script/install-openssl/run.sh | 17 + .../README.md | 132 + .../_cm.yaml | 17 + .../customize.py | 38 + script/install-python-src/README.md | 183 + script/install-python-src/_cm.json | 93 + script/install-python-src/customize.py | 46 + script/install-python-src/run.sh | 71 + script/install-python-venv/README.md | 154 + script/install-python-venv/_cm.json | 44 + script/install-python-venv/customize.py | 83 + script/install-python-venv/run.bat | 5 + script/install-python-venv/run.sh | 10 + script/install-pytorch-from-src/README.md | 249 + script/install-pytorch-from-src/_cm.json | 255 + script/install-pytorch-from-src/customize.py | 23 + .../run-intel-mlperf-inference-v3_1.sh | 40 + script/install-pytorch-from-src/run.sh | 22 + .../install-pytorch-kineto-from-src/README.md | 192 + .../install-pytorch-kineto-from-src/_cm.json | 130 + .../customize.py | 17 + script/install-pytorch-kineto-from-src/run.sh | 15 + .../README.md | 201 + .../_cm.json | 118 + .../customize.py | 43 + .../install-qaic-compute-sdk-from-src/run.sh | 24 + script/install-rocm/README.md | 131 + script/install-rocm/_cm.json | 28 + script/install-rocm/customize.py | 19 + script/install-rocm/run-rhel.sh | 27 + script/install-rocm/run-ubuntu.sh | 32 + script/install-rocm/run.sh | 2 + script/install-tensorflow-for-c/README.md | 124 + script/install-tensorflow-for-c/_cm.json | 21 + script/install-tensorflow-for-c/customize.py | 27 + script/install-tensorflow-for-c/run.sh | 13 + script/install-tensorflow-from-src/README.md | 166 + script/install-tensorflow-from-src/_cm.json | 338 ++ .../install-tensorflow-from-src/customize.py | 40 + script/install-tensorflow-from-src/run.sh | 44 + script/install-terraform-from-src/README.md | 132 + script/install-terraform-from-src/_cm.json | 37 + .../install-terraform-from-src/customize.py | 19 + script/install-terraform-from-src/run.sh | 16 + script/install-tflite-from-src/README.md | 137 + script/install-tflite-from-src/_cm.json | 62 + script/install-tflite-from-src/customize.py | 27 + script/install-tflite-from-src/run.sh | 27 + script/install-torchvision-from-src/README.md | 195 + script/install-torchvision-from-src/_cm.json | 140 + .../install-torchvision-from-src/customize.py | 26 + script/install-torchvision-from-src/run.sh | 14 + .../install-tpp-pytorch-extension/README.md | 198 + script/install-tpp-pytorch-extension/_cm.json | 163 + .../customize.py | 23 + script/install-tpp-pytorch-extension/run.sh | 11 + .../install-transformers-from-src/README.md | 196 + script/install-transformers-from-src/_cm.json | 161 + .../customize.py | 21 + script/install-transformers-from-src/run.sh | 24 + script/launch-benchmark/README-extra.md | 3 + script/launch-benchmark/README.md | 118 + script/launch-benchmark/_cm.yaml | 15 + script/launch-benchmark/customize.py | 732 +++ script/launch-benchmark/tests/debug.py | 6 + script/prepare-training-data-bert/README.md | 195 + script/prepare-training-data-bert/_cm.json | 149 + .../prepare-training-data-bert/customize.py | 50 + .../prepare-training-data-bert/run-nvidia.sh | 33 + .../run-reference.sh | 81 + script/prepare-training-data-bert/run.sh | 36 + .../prepare-training-data-bert/run_config.yml | 13 + script/prepare-training-data-resnet/README.md | 207 + script/prepare-training-data-resnet/_cm.json | 121 + .../prepare-training-data-resnet/customize.py | 52 + .../run-nvidia.sh | 43 + .../run-reference.sh | 37 + .../run_config.yml | 13 + .../README.md | 145 + .../_cm.json | 50 + .../customize.py | 43 + .../run.sh | 5 + script/print-croissant-desc/README-extra.md | 16 + script/print-croissant-desc/README.md | 146 + script/print-croissant-desc/_cm.yaml | 29 + script/print-croissant-desc/code.py | 27 + script/print-croissant-desc/run.bat | 2 + script/print-croissant-desc/run.sh | 4 + script/print-hello-world-java/README.md | 125 + script/print-hello-world-java/_cm.json | 26 + script/print-hello-world-java/code.java | 27 + script/print-hello-world-java/run.bat | 4 + script/print-hello-world-java/run.sh | 6 + script/print-hello-world-javac/README.md | 125 + script/print-hello-world-javac/_cm.json | 26 + script/print-hello-world-javac/code.java | 27 + script/print-hello-world-javac/run.bat | 8 + script/print-hello-world-javac/run.sh | 10 + script/print-hello-world-py/README.md | 130 + script/print-hello-world-py/_cm.json | 37 + script/print-hello-world-py/code.py | 6 + script/print-hello-world-py/run.bat | 8 + script/print-hello-world-py/run.sh | 11 + script/print-hello-world/README.md | 137 + script/print-hello-world/_cm.json | 32 + script/print-hello-world/run.bat | 7 + script/print-hello-world/run.sh | 9 + script/print-python-version/README.md | 123 + script/print-python-version/_cm.json | 22 + script/print-python-version/run.bat | 8 + script/print-python-version/run.sh | 11 + script/process-ae-users/README.md | 138 + script/process-ae-users/_cm.json | 25 + script/process-ae-users/code.py | 78 + script/process-ae-users/customize.py | 10 + script/process-ae-users/run.bat | 2 + script/process-ae-users/run.sh | 4 + script/process-mlperf-accuracy/README.md | 332 ++ script/process-mlperf-accuracy/_cm.json | 304 + script/process-mlperf-accuracy/customize.py | 163 + script/process-mlperf-accuracy/run.bat | 8 + script/process-mlperf-accuracy/run.sh | 9 + script/prune-bert-models/README-extra.md | 1 + script/prune-bert-models/README.md | 187 + script/prune-bert-models/_cm.json | 87 + script/prune-bert-models/customize.py | 48 + script/prune-bert-models/run.sh | 19 + script/prune-docker/README.md | 120 + script/prune-docker/_cm.json | 11 + script/prune-docker/run.bat | 1 + script/prune-docker/run.sh | 3 + script/publish-results-to-dashboard/README.md | 125 + script/publish-results-to-dashboard/_cm.json | 23 + script/publish-results-to-dashboard/code.py | 92 + script/publish-results-to-dashboard/run.bat | 2 + script/publish-results-to-dashboard/run.sh | 7 + script/pull-git-repo/README.md | 136 + script/pull-git-repo/_cm.json | 27 + script/pull-git-repo/customize.py | 28 + script/pull-git-repo/run.sh | 16 + script/push-csv-to-spreadsheet/README.md | 144 + script/push-csv-to-spreadsheet/_cm.json | 36 + script/push-csv-to-spreadsheet/customize.py | 15 + script/push-csv-to-spreadsheet/google_api.py | 55 + script/push-csv-to-spreadsheet/run.sh | 3 + .../README.md | 151 + .../_cm.json | 53 + .../customize.py | 35 + .../run.sh | 14 + script/remote-run-commands/README-extra.md | 0 script/remote-run-commands/README.md | 147 + script/remote-run-commands/_cm.json | 33 + script/remote-run-commands/customize.py | 49 + script/remote-run-commands/run.bat | 1 + script/remote-run-commands/run.sh | 4 + .../README-extra.md | 75 + .../reproduce-ipol-paper-2022-439/README.md | 150 + script/reproduce-ipol-paper-2022-439/_cm.yaml | 40 + .../customize.py | 34 + .../requirements.txt | 5 + script/reproduce-ipol-paper-2022-439/run.bat | 33 + script/reproduce-ipol-paper-2022-439/run.sh | 42 + .../README-extra.md | 44 + .../README.md | 179 + .../_cm.yaml | 37 + .../customize.py | 22 + .../install_deps.sh | 24 + .../main.py | 10 + .../plot.sh | 14 + .../reproduce-micro-paper-2023-victima/run.sh | 15 + .../reproduce-micro-paper-2023-xyz/README.md | 178 + .../README.md | 381 ++ .../README-extra.md | 13 + .../README.md | 215 + .../_cm.json | 140 + .../customize.py | 23 + .../dockerfiles/ubuntu_20.04.Dockerfile | 17 + .../run.sh | 32 + .../README.md | 171 + .../reproduce-mlperf-training-nvidia/_cm.yaml | 77 + .../customize.py | 32 + .../run-resnet.sh | 16 + .../reproduce-mlperf-training-nvidia/run.sh | 8 + script/run-all-mlperf-models/README.md | 237 + script/run-all-mlperf-models/_cm.yaml | 130 + script/run-all-mlperf-models/customize.py | 103 + .../run-all-mlperf-models/run-bert-macos.sh | 75 + script/run-all-mlperf-models/run-bert.sh | 79 + .../run-cpp-implementation.sh | 163 + .../run-mobilenet-models.sh | 67 + .../run-all-mlperf-models/run-nvidia-4090.sh | 61 + .../run-all-mlperf-models/run-nvidia-a100.sh | 59 + script/run-all-mlperf-models/run-nvidia-t4.sh | 59 + .../run-all-mlperf-models/run-pruned-bert.sh | 85 + .../run-reference-models.sh | 67 + .../run-resnet50-macos.sh | 70 + script/run-all-mlperf-models/run-resnet50.sh | 87 + script/run-all-mlperf-models/run-retinanet-sh | 86 + script/run-all-mlperf-models/template.sh | 66 + script/run-docker-container/README-extra.md | 15 + script/run-docker-container/README.md | 166 + script/run-docker-container/_cm.json | 61 + script/run-docker-container/customize.py | 256 + .../run-mlperf-inference-app/README-extra.md | 21 + script/run-mlperf-inference-app/README.md | 400 ++ script/run-mlperf-inference-app/_cm.yaml | 501 ++ script/run-mlperf-inference-app/customize.py | 797 +++ .../faq/ctuning-cpp-tflite.md | 1 + .../faq/deepsparse.md | 1 + script/run-mlperf-inference-app/faq/intel.md | 1 + .../faq/mlcommons-cpp.md | 1 + .../faq/mlcommons-python.md | 1 + script/run-mlperf-inference-app/faq/nvidia.md | 2 + .../run-mlperf-inference-app/faq/qualcomm.md | 1 + .../modular-cm-containers/README.md | 30 + .../modular-cm-containers/_common.bat | 7 + .../modular-cm-containers/_common.sh | 10 + .../modular-cm-containers/build.bat | 25 + .../modular-cm-containers/build.sh | 27 + .../mlperf-inference--ubuntu-cpu.Dockerfile | 118 + .../modular-cm-containers/run.bat | 3 + .../modular-cm-containers/run.sh | 3 + .../run-mlperf-inference-app/run_mobilenet.py | 106 + .../setup/b-deepsparse.md | 1 + .../run-mlperf-inference-app/setup/i-intel.md | 1 + .../setup/i-nvidia.md | 3 + .../setup/i-qualcomm.md | 6 + .../README-about.md | 107 + .../README.md | 385 ++ .../_cm.json | 167 + .../customize.py | 189 + .../run.sh | 1 + .../README-extra.md | 10 + .../README.md | 197 + .../_cm.json | 140 + .../code.py | 27 + .../customize.py | 92 + .../run.bat | 6 + .../run.sh | 13 + .../run-mlperf-power-client/README-extra.md | 15 + script/run-mlperf-power-client/README.md | 156 + script/run-mlperf-power-client/_cm.json | 55 + script/run-mlperf-power-client/customize.py | 43 + script/run-mlperf-power-client/dummy.sh | 12 + script/run-mlperf-power-client/run.sh | 14 + .../run-mlperf-power-server/README-extra.md | 17 + script/run-mlperf-power-server/README.md | 165 + script/run-mlperf-power-server/_cm.json | 82 + script/run-mlperf-power-server/customize.py | 39 + script/run-mlperf-power-server/run.bat | 7 + script/run-mlperf-power-server/run.sh | 5 + .../README.md | 181 + .../_cm.json | 108 + .../customize.py | 37 + .../run.sh | 10 + script/run-python/README.md | 140 + script/run-python/_cm.json | 26 + script/run-python/run.bat | 2 + script/run-python/run.sh | 4 + script/run-terraform/README-about.md | 12 + script/run-terraform/README-extra.md | 1 + script/run-terraform/README.md | 482 ++ script/run-terraform/_cm.json | 470 ++ script/run-terraform/aws/apply_credentials.sh | 3 + script/run-terraform/aws/credentials.example | 3 + script/run-terraform/aws/main.tf | 67 + script/run-terraform/customize.py | 87 + script/run-terraform/gcp/apply_credentials.sh | 0 script/run-terraform/gcp/main.tf | 80 + script/run-terraform/run.sh | 14 + .../README.md | 116 + .../_cm.yaml | 13 + .../customize.py | 63 + script/set-device-settings-qaic/README.md | 145 + script/set-device-settings-qaic/_cm.json | 54 + script/set-device-settings-qaic/customize.py | 39 + script/set-device-settings-qaic/run.sh | 44 + script/set-echo-off-win/README.md | 118 + script/set-echo-off-win/_cm.json | 18 + script/set-echo-off-win/customize.py | 25 + script/set-performance-mode/README.md | 182 + script/set-performance-mode/_cm.json | 60 + script/set-performance-mode/customize.py | 23 + script/set-performance-mode/run-ubuntu.sh | 36 + script/set-performance-mode/run.bat | 1 + script/set-performance-mode/run.sh | 27 + script/set-sqlite-dir/README.md | 143 + script/set-sqlite-dir/_cm.json | 33 + script/set-sqlite-dir/code.py | 2 + script/set-sqlite-dir/customize.py | 9 + script/set-sqlite-dir/run.bat | 2 + script/set-sqlite-dir/run.sh | 4 + script/set-venv/README-extra.md | 6 + script/set-venv/README.md | 131 + script/set-venv/_cm.yaml | 14 + script/set-venv/customize.py | 96 + script/tar-my-folder/README-extra.md | 12 + script/tar-my-folder/README.md | 135 + script/tar-my-folder/_cm.json | 19 + script/tar-my-folder/customize.py | 29 + .../README-extra.md | 1 + .../README.md | 125 + .../_cm.yaml | 31 + .../customize.py | 22 + .../run.bat | 1 + .../run.sh | 27 + .../test-mlperf-inference-retinanet/README.md | 137 + .../test-mlperf-inference-retinanet/_cm.json | 49 + .../customize.py | 18 + .../test-mlperf-inference-retinanet/run.bat | 8 + script/test-mlperf-inference-retinanet/run.sh | 9 + script/test-set-sys-user-cm/README.md | 120 + script/test-set-sys-user-cm/_cm.json | 18 + script/test-set-sys-user-cm/run.sh | 7 + .../README-extra.md | 7 + .../README.md | 146 + .../_cm.json | 55 + .../customize.py | 25 + .../run.sh | 5 + script/upgrade-python-pip/README.md | 125 + script/upgrade-python-pip/_cm.json | 25 + script/upgrade-python-pip/run.bat | 2 + script/upgrade-python-pip/run.sh | 4 + .../README-extra.md | 17 + .../README.md | 142 + .../_cm.json | 38 + .../customize.py | 37 + .../run.sh | 0 1603 files changed, 143150 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/check-all-broken-links.md create mode 100644 .github/workflows/check-broken-links.md create mode 100644 .github/workflows/test-cm-script-features.yml create mode 100644 .github/workflows/test-cm-scripts.yml create mode 100644 .github/workflows/test-cm-tutorial-retinanet.yml create mode 100644 .github/workflows/test-cm-tutorial-tvm-pip.yml create mode 100644 .github/workflows/test-cm-tutorial-tvm.yml create mode 100644 .github/workflows/test-cm.yml create mode 100644 .github/workflows/test-image-classification-onnx.yml create mode 100644 .github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml create mode 100644 .github/workflows/test-mlperf-inference-gptj.yml create mode 100644 .github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml create mode 100644 .github/workflows/test-mlperf-inference-resnet50.yml create mode 100644 .github/workflows/test-mlperf-inference-retinanet.yml create mode 100644 .github/workflows/test-mlperf-inference-rnnt.yml create mode 100644 .github/workflows/test-mlperf-inference-tvm.yml create mode 100644 .github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml create mode 100644 .github/workflows/test-qaic-compute-sdk-build.yml create mode 100644 .github/workflows/test-qaic-software-kit.yml create mode 100644 .github/workflows/update-script-dockerfiles.yml create mode 100644 .github/workflows/update-script-readme.yml create mode 100644 CHANGES.md create mode 100644 LICENSE.third-party.md create mode 100644 README.md create mode 100644 automation/cache/README-extra.md create mode 100644 automation/cache/README.md create mode 100644 automation/cache/_cm.json create mode 100644 automation/cache/module.py create mode 100644 automation/cache/module_misc.py create mode 100644 automation/cfg/README.md create mode 100644 automation/cfg/_cm.json create mode 100644 automation/cfg/module.py create mode 100644 automation/challenge/README.md create mode 100644 automation/challenge/_cm.json create mode 100644 automation/challenge/module.py create mode 100644 automation/contributor/README.md create mode 100644 automation/contributor/_cm.json create mode 100644 automation/contributor/module.py create mode 100644 automation/data/_cm.json create mode 100644 automation/data/module.py create mode 100644 automation/docker/README.md create mode 100644 automation/docker/_cm.json create mode 100644 automation/docker/module.py create mode 100644 automation/docs/_cm.json create mode 100644 automation/docs/module.py create mode 100644 automation/experiment/README-extra.md create mode 100644 automation/experiment/README.md create mode 100644 automation/experiment/_cm.json create mode 100644 automation/experiment/module.py create mode 100644 automation/experiment/tests/test2.bat create mode 100644 automation/experiment/tests/test2.sh create mode 100644 automation/experiment/tests/test3.bat create mode 100644 automation/experiment/tests/test3.sh create mode 100644 automation/experiment/tests/test3_input.yaml create mode 100644 automation/experiment/tests/test__json.bat create mode 100644 automation/experiment/tests/test__json.sh create mode 100644 automation/experiment/tests/test__yaml.bat create mode 100644 automation/experiment/tests/test__yaml.sh create mode 100644 automation/experiment/tests/test_input.json create mode 100644 automation/experiment/tests/test_input.yaml create mode 100644 automation/experiment/tests/test_run.bat create mode 100644 automation/experiment/tests/test_run.sh create mode 100644 automation/project/README.md create mode 100644 automation/project/_cm.json create mode 100644 automation/project/module.py create mode 100644 automation/report/README.md create mode 100644 automation/report/_cm.json create mode 100644 automation/report/module.py create mode 100644 automation/script/README-extra.md create mode 100644 automation/script/README-specs.md create mode 100644 automation/script/README.md create mode 100644 automation/script/_cm.json create mode 100644 automation/script/assets/scripts-workflow.png create mode 100644 automation/script/module.py create mode 100644 automation/script/module_help.py create mode 100644 automation/script/module_misc.py create mode 100644 automation/script/template-ae-python/README-extra.md create mode 100644 automation/script/template-ae-python/_cm.yaml create mode 100644 automation/script/template-ae-python/analyze.bat create mode 100644 automation/script/template-ae-python/analyze.sh create mode 100644 automation/script/template-ae-python/customize.py create mode 100644 automation/script/template-ae-python/install_deps.bat create mode 100644 automation/script/template-ae-python/install_deps.sh create mode 100644 automation/script/template-ae-python/main.py create mode 100644 automation/script/template-ae-python/plot.bat create mode 100644 automation/script/template-ae-python/plot.sh create mode 100644 automation/script/template-ae-python/reproduce.bat create mode 100644 automation/script/template-ae-python/reproduce.sh create mode 100644 automation/script/template-ae-python/run.bat create mode 100644 automation/script/template-ae-python/run.sh create mode 100644 automation/script/template-ae-python/validate.bat create mode 100644 automation/script/template-ae-python/validate.sh create mode 100644 automation/script/template-python/README-extra.md create mode 100644 automation/script/template-python/_cm.yaml create mode 100644 automation/script/template-python/customize.py create mode 100644 automation/script/template-python/main.py create mode 100644 automation/script/template-python/requirements.txt create mode 100644 automation/script/template-python/run.bat create mode 100644 automation/script/template-python/run.sh create mode 100644 automation/script/template-pytorch/README-extra.md create mode 100644 automation/script/template-pytorch/_cm.yaml create mode 100644 automation/script/template-pytorch/customize.py create mode 100644 automation/script/template-pytorch/main.py create mode 100644 automation/script/template-pytorch/requirements.txt create mode 100644 automation/script/template-pytorch/run.bat create mode 100644 automation/script/template-pytorch/run.sh create mode 100644 automation/script/template/README-extra.md create mode 100644 automation/script/template/customize.py create mode 100644 automation/script/template/run.bat create mode 100644 automation/script/template/run.sh create mode 100644 automation/script/template_list_of_scripts.md create mode 100644 automation/utils/README.md create mode 100644 automation/utils/_cm.json create mode 100644 automation/utils/module.py create mode 100644 automation/utils/module_cfg.py create mode 100644 cfg/benchmark-hardware-compute/_cm.json create mode 100644 cfg/benchmark-hardware-compute/amd-cpu-x64.json create mode 100644 cfg/benchmark-hardware-compute/amd-gpu.json create mode 100644 cfg/benchmark-hardware-compute/generic-cpu-arm64.json create mode 100644 cfg/benchmark-hardware-compute/google-tpu.json create mode 100644 cfg/benchmark-hardware-compute/habana-gaudi.json create mode 100644 cfg/benchmark-hardware-compute/intel-cpu-x64.json create mode 100644 cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml create mode 100644 cfg/benchmark-hardware-compute/nvidia-gpu.json create mode 100644 cfg/benchmark-hardware-compute/qualcomm-ai100.json create mode 100644 cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml create mode 100644 cfg/benchmark-list/_cm.json create mode 100644 cfg/benchmark-list/loadgen-cpp.yaml create mode 100644 cfg/benchmark-list/loadgen-python.yaml create mode 100644 cfg/benchmark-list/mlperf-abtf.yaml create mode 100644 cfg/benchmark-list/mlperf-inference.yaml create mode 100644 cfg/benchmark-list/mlperf-mobile.yaml create mode 100644 cfg/benchmark-list/mlperf-tiny.yaml create mode 100644 cfg/benchmark-list/mlperf-training.yaml create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json create mode 100644 cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json create mode 100644 cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml create mode 100644 script/README.md create mode 100644 script/activate-python-venv/README-extra.md create mode 100644 script/activate-python-venv/README.md create mode 100644 script/activate-python-venv/_cm.json create mode 100644 script/activate-python-venv/customize.py create mode 100644 script/activate-python-venv/run.bat create mode 100644 script/activate-python-venv/run.sh create mode 100644 script/add-custom-nvidia-system/README-extra.md create mode 100644 script/add-custom-nvidia-system/README.md create mode 100644 script/add-custom-nvidia-system/_cm.yaml create mode 100644 script/add-custom-nvidia-system/customize.py create mode 100644 script/add-custom-nvidia-system/run.sh create mode 100644 script/app-image-classification-onnx-py/README-extra.md create mode 100644 script/app-image-classification-onnx-py/README.md create mode 100644 script/app-image-classification-onnx-py/_cm.yaml create mode 100644 script/app-image-classification-onnx-py/customize.py create mode 100644 script/app-image-classification-onnx-py/img/computer_mouse.jpg create mode 100644 script/app-image-classification-onnx-py/requirements.txt create mode 100644 script/app-image-classification-onnx-py/run.bat create mode 100644 script/app-image-classification-onnx-py/run.sh create mode 100644 script/app-image-classification-onnx-py/src/onnx_classify.py create mode 100644 script/app-image-classification-onnx-py/tests/README.md create mode 100644 script/app-image-classification-tf-onnx-cpp/README-extra.md create mode 100644 script/app-image-classification-tf-onnx-cpp/README.md create mode 100644 script/app-image-classification-tf-onnx-cpp/_cm.json create mode 100644 script/app-image-classification-tf-onnx-cpp/include/benchmark.h create mode 100644 script/app-image-classification-tf-onnx-cpp/run.sh create mode 100644 script/app-image-classification-tf-onnx-cpp/src/classification.cpp create mode 100644 script/app-image-classification-torch-py/README-extra.md create mode 100644 script/app-image-classification-torch-py/README.md create mode 100644 script/app-image-classification-torch-py/_cm.json create mode 100644 script/app-image-classification-torch-py/img/computer_mouse.jpg create mode 100644 script/app-image-classification-torch-py/requirements.txt create mode 100644 script/app-image-classification-torch-py/run.bat create mode 100644 script/app-image-classification-torch-py/run.sh create mode 100644 script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py create mode 100644 script/app-image-classification-tvm-onnx-py/README-extra.md create mode 100644 script/app-image-classification-tvm-onnx-py/README.md create mode 100644 script/app-image-classification-tvm-onnx-py/_cm.json create mode 100644 script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg create mode 100644 script/app-image-classification-tvm-onnx-py/requirements.txt create mode 100644 script/app-image-classification-tvm-onnx-py/run.sh create mode 100644 script/app-image-classification-tvm-onnx-py/src/classify.py create mode 100644 script/app-image-corner-detection/README-extra.md create mode 100644 script/app-image-corner-detection/README.md create mode 100644 script/app-image-corner-detection/_cm.json create mode 100644 script/app-image-corner-detection/customize.py create mode 100644 script/app-image-corner-detection/run.sh create mode 100644 script/app-image-corner-detection/susan.c create mode 100644 script/app-loadgen-generic-python/README-extra.md create mode 100644 script/app-loadgen-generic-python/README.md create mode 100644 script/app-loadgen-generic-python/_cm.yaml create mode 100644 script/app-loadgen-generic-python/customize.py create mode 100644 script/app-loadgen-generic-python/run.bat create mode 100644 script/app-loadgen-generic-python/run.sh create mode 100644 script/app-loadgen-generic-python/src/backend_onnxruntime.py create mode 100644 script/app-loadgen-generic-python/src/backend_pytorch.py create mode 100644 script/app-loadgen-generic-python/src/loadgen/harness.py create mode 100644 script/app-loadgen-generic-python/src/loadgen/model.py create mode 100644 script/app-loadgen-generic-python/src/loadgen/runners.py create mode 100644 script/app-loadgen-generic-python/src/main.py create mode 100644 script/app-loadgen-generic-python/src/utils.py create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat create mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh create mode 100644 script/app-mlperf-inference-ctuning-cpp-tflite/README.md create mode 100644 script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json create mode 100644 script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp create mode 100644 script/app-mlperf-inference-ctuning-cpp-tflite/customize.py create mode 100644 script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h create mode 100644 script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp create mode 100644 script/app-mlperf-inference-dummy/README.md create mode 100644 script/app-mlperf-inference-dummy/_cm.yaml create mode 100644 script/app-mlperf-inference-dummy/customize.py create mode 100644 script/app-mlperf-inference-dummy/run.sh create mode 100644 script/app-mlperf-inference-intel/README.md create mode 100644 script/app-mlperf-inference-intel/_cm.yaml create mode 100644 script/app-mlperf-inference-intel/build_bert_harness.sh create mode 100644 script/app-mlperf-inference-intel/build_gptj_harness.sh create mode 100644 script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh create mode 100644 script/app-mlperf-inference-intel/customize.py create mode 100644 script/app-mlperf-inference-intel/run_bert_harness.sh create mode 100644 script/app-mlperf-inference-intel/run_gptj_harness.sh create mode 100644 script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md create mode 100644 script/app-mlperf-inference-mlcommons-cpp/README-extra.md create mode 100644 script/app-mlperf-inference-mlcommons-cpp/README.md create mode 100644 script/app-mlperf-inference-mlcommons-cpp/_cm.yaml create mode 100644 script/app-mlperf-inference-mlcommons-cpp/customize.py create mode 100644 script/app-mlperf-inference-mlcommons-cpp/dockerfiles/ubuntu_22.04.Dockerfile create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/backend.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/common.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/device.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/model.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/npy.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/system.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h create mode 100644 script/app-mlperf-inference-mlcommons-cpp/src/main.cpp create mode 100644 script/app-mlperf-inference-mlcommons-cpp/tests/win.bat create mode 100644 script/app-mlperf-inference-mlcommons-python/README-about.md create mode 100644 script/app-mlperf-inference-mlcommons-python/README-extra.md create mode 100644 script/app-mlperf-inference-mlcommons-python/README.md create mode 100644 script/app-mlperf-inference-mlcommons-python/_cm.yaml create mode 100644 script/app-mlperf-inference-mlcommons-python/customize.py create mode 100644 script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py create mode 100644 script/app-mlperf-inference-nvidia/README-about.md create mode 100644 script/app-mlperf-inference-nvidia/README.md create mode 100644 script/app-mlperf-inference-nvidia/_cm.yaml create mode 100644 script/app-mlperf-inference-nvidia/customize.py create mode 100644 script/app-mlperf-inference-nvidia/run.sh create mode 100644 script/app-mlperf-inference-qualcomm/README.md create mode 100644 script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md create mode 100644 script/app-mlperf-inference-qualcomm/_cm.yaml create mode 100644 script/app-mlperf-inference-qualcomm/customize.py create mode 100644 script/app-mlperf-inference-qualcomm/run.sh create mode 100644 script/app-mlperf-inference/README-about.md create mode 100644 script/app-mlperf-inference/README-extra.md create mode 100644 script/app-mlperf-inference/README.md create mode 100644 script/app-mlperf-inference/_cm.yaml create mode 100644 script/app-mlperf-inference/build_dockerfiles.py create mode 100644 script/app-mlperf-inference/customize.py create mode 100644 script/app-mlperf-inference/dockerfiles/README.md create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/_info.md create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_tensorflow_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/_test.sh create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_pytorch_cpu.Dockerfile create mode 100644 script/app-mlperf-inference/run.sh create mode 100644 script/app-mlperf-inference/run_config.yml create mode 100644 script/app-mlperf-inference/verify_accuracy.sh create mode 100644 script/app-mlperf-training-nvidia/README.md create mode 100644 script/app-mlperf-training-nvidia/_cm.yaml create mode 100644 script/app-mlperf-training-nvidia/customize.py create mode 100644 script/app-mlperf-training-nvidia/run-bert-training.sh create mode 100644 script/app-mlperf-training-nvidia/run.sh create mode 100644 script/app-mlperf-training-reference/README.md create mode 100644 script/app-mlperf-training-reference/_cm.yaml create mode 100644 script/app-mlperf-training-reference/customize.py create mode 100644 script/app-mlperf-training-reference/run-bert-training.sh create mode 100644 script/app-mlperf-training-reference/run.sh create mode 100644 script/app-stable-diffusion-onnx-py/README-extra.md create mode 100644 script/app-stable-diffusion-onnx-py/README.md create mode 100644 script/app-stable-diffusion-onnx-py/_cm.yaml create mode 100644 script/app-stable-diffusion-onnx-py/process.py create mode 100644 script/app-stable-diffusion-onnx-py/run.bat create mode 100644 script/app-stable-diffusion-onnx-py/run.sh create mode 100644 script/benchmark-any-mlperf-inference-implementation/README.md create mode 100644 script/benchmark-any-mlperf-inference-implementation/_cm.yaml create mode 100644 script/benchmark-any-mlperf-inference-implementation/customize.py create mode 100644 script/benchmark-any-mlperf-inference-implementation/run-template.sh create mode 100644 script/benchmark-program-mlperf/README.md create mode 100644 script/benchmark-program-mlperf/_cm.json create mode 100644 script/benchmark-program-mlperf/customize.py create mode 100644 script/benchmark-program/README.md create mode 100644 script/benchmark-program/_cm.json create mode 100644 script/benchmark-program/customize.py create mode 100644 script/benchmark-program/run-ubuntu.sh create mode 100644 script/benchmark-program/run.bat create mode 100644 script/benchmark-program/run.sh create mode 100644 script/build-docker-image/README-extra.md create mode 100644 script/build-docker-image/README.md create mode 100644 script/build-docker-image/_cm.json create mode 100644 script/build-docker-image/customize.py create mode 100644 script/build-docker-image/examples/0-common.bat create mode 100644 script/build-docker-image/examples/0-generate.bat create mode 100644 script/build-docker-image/examples/1-build.bat create mode 100644 script/build-docker-image/examples/2-run-cm-command1.bat create mode 100644 script/build-docker-image/examples/2-run-cm-command2.bat create mode 100644 script/build-docker-image/examples/2-run-cm-command3.bat create mode 100644 script/build-docker-image/examples/2-run-cm-command4.bat create mode 100644 script/build-docker-image/examples/2-run-cm-command5.bat create mode 100644 script/build-docker-image/examples/2-run-interactive1.bat create mode 100644 script/build-docker-image/examples/2-run-interactive2.bat create mode 100644 script/build-docker-image/examples/3-push-to-docker-hub.bat create mode 100644 script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 create mode 100644 script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 create mode 100644 script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest create mode 100644 script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest create mode 100644 script/build-docker-image/examples/README.md create mode 100644 script/build-docker-image/examples/computer_mouse.jpg create mode 100644 script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py create mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat create mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh create mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat create mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh create mode 100644 script/build-docker-image/run.bat create mode 100644 script/build-docker-image/run.sh create mode 100644 script/build-dockerfile/README-extra.md create mode 100644 script/build-dockerfile/README.md create mode 100644 script/build-dockerfile/_cm.json create mode 100644 script/build-dockerfile/customize.py create mode 100644 script/build-dockerfile/dockerfiles/rhel_9.Dockerfile create mode 100644 script/build-dockerfile/dockerfiles/ubuntu_18.04.Dockerfile create mode 100644 script/build-dockerfile/dockerfiles/ubuntu_20.04.Dockerfile create mode 100644 script/build-dockerfile/dockerfiles/ubuntu_22.04.Dockerfile create mode 100644 script/build-dockerfile/dockerinfo.json create mode 100644 script/build-mlperf-inference-server-nvidia/README-extra.md create mode 100644 script/build-mlperf-inference-server-nvidia/README.md create mode 100644 script/build-mlperf-inference-server-nvidia/_cm.yaml create mode 100644 script/build-mlperf-inference-server-nvidia/customize.py create mode 100644 script/build-mlperf-inference-server-nvidia/run.sh create mode 100644 script/calibrate-model-for.qaic/README.md create mode 100644 script/calibrate-model-for.qaic/_cm.json create mode 100644 script/calibrate-model-for.qaic/customize.py create mode 100644 script/calibrate-model-for.qaic/run.sh create mode 100644 script/compile-model-for.qaic/README.md create mode 100644 script/compile-model-for.qaic/_cm.json create mode 100644 script/compile-model-for.qaic/customize.py create mode 100644 script/compile-model-for.qaic/run.sh create mode 100644 script/compile-program/README-extra.md create mode 100644 script/compile-program/README.md create mode 100644 script/compile-program/_cm.json create mode 100644 script/compile-program/customize.py create mode 100644 script/compile-program/run.bat create mode 100644 script/compile-program/run.sh create mode 100644 script/convert-csv-to-md/README.md create mode 100644 script/convert-csv-to-md/_cm.json create mode 100644 script/convert-csv-to-md/customize.py create mode 100644 script/convert-csv-to-md/process.py create mode 100644 script/convert-csv-to-md/run.bat create mode 100644 script/convert-csv-to-md/run.sh create mode 100644 script/convert-ml-model-huggingface-to-onnx/README.md create mode 100644 script/convert-ml-model-huggingface-to-onnx/_cm.json create mode 100644 script/convert-ml-model-huggingface-to-onnx/customize.py create mode 100644 script/convert-ml-model-huggingface-to-onnx/run.sh create mode 100644 script/copy-to-clipboard/README.md create mode 100644 script/copy-to-clipboard/_cm.yaml create mode 100644 script/copy-to-clipboard/code.py create mode 100644 script/copy-to-clipboard/run.bat create mode 100644 script/copy-to-clipboard/run.sh create mode 100644 script/create-conda-env/README.md create mode 100644 script/create-conda-env/_cm.json create mode 100644 script/create-conda-env/customize.py create mode 100644 script/create-conda-env/run.sh create mode 100644 script/create-fpgaconvnet-app-tinyml/README.md create mode 100644 script/create-fpgaconvnet-app-tinyml/_cm.json create mode 100644 script/create-fpgaconvnet-app-tinyml/customize.py create mode 100644 script/create-fpgaconvnet-app-tinyml/run.sh create mode 100644 script/create-fpgaconvnet-config-tinyml/README.md create mode 100644 script/create-fpgaconvnet-config-tinyml/_cm.json create mode 100644 script/create-fpgaconvnet-config-tinyml/customize.py create mode 100644 script/create-fpgaconvnet-config-tinyml/run.sh create mode 100644 script/create-patch/README-extra.md create mode 100644 script/create-patch/README.md create mode 100644 script/create-patch/_cm.yaml create mode 100644 script/create-patch/customize.py create mode 100644 script/destroy-terraform/README-extra.md create mode 100644 script/destroy-terraform/README.md create mode 100644 script/destroy-terraform/_cm.json create mode 100644 script/destroy-terraform/customize.py create mode 100644 script/destroy-terraform/run.bat create mode 100644 script/destroy-terraform/run.sh create mode 100644 script/detect-cpu/README-extra.md create mode 100644 script/detect-cpu/README.md create mode 100644 script/detect-cpu/_cm.json create mode 100644 script/detect-cpu/customize.py create mode 100644 script/detect-cpu/run.bat create mode 100644 script/detect-cpu/run.sh create mode 100644 script/detect-os/README.md create mode 100644 script/detect-os/_cm.json create mode 100644 script/detect-os/customize.py create mode 100644 script/detect-os/run.bat create mode 100644 script/detect-os/run.sh create mode 100644 script/detect-os/run_config.yml create mode 100644 script/detect-sudo/README.md create mode 100644 script/detect-sudo/_cm.yaml create mode 100644 script/detect-sudo/customize.py create mode 100644 script/detect-sudo/run.sh create mode 100644 script/download-and-extract/README-extra.md create mode 100644 script/download-and-extract/README.md create mode 100644 script/download-and-extract/_cm.json create mode 100644 script/download-and-extract/customize.py create mode 100644 script/download-and-extract/tests/download-and-extract-file.bat create mode 100644 script/download-and-extract/tests/download-and-extract-file2.bat create mode 100644 script/download-file/README-extra.md create mode 100644 script/download-file/README.md create mode 100644 script/download-file/_cm.json create mode 100644 script/download-file/customize.py create mode 100644 script/download-file/run.bat create mode 100644 script/download-file/run.sh create mode 100644 script/download-file/tests/download-file.bat create mode 100644 script/download-file/tests/download-file2.bat create mode 100644 script/download-torrent/README.md create mode 100644 script/download-torrent/_cm.json create mode 100644 script/download-torrent/customize.py create mode 100644 script/download-torrent/run.sh create mode 100644 script/dump-pip-freeze/README.md create mode 100644 script/dump-pip-freeze/_cm.yaml create mode 100644 script/dump-pip-freeze/customize.py create mode 100644 script/dump-pip-freeze/dump.py create mode 100644 script/dump-pip-freeze/run.bat create mode 100644 script/dump-pip-freeze/run.sh create mode 100644 script/extract-file/README-extra.md create mode 100644 script/extract-file/README.md create mode 100644 script/extract-file/_cm.json create mode 100644 script/extract-file/customize.py create mode 100644 script/extract-file/run.bat create mode 100644 script/extract-file/run.sh create mode 100644 script/fail/README-extra.md create mode 100644 script/fail/README.md create mode 100644 script/fail/_cm.yaml create mode 100644 script/fail/customize.py create mode 100644 script/flash-tinyml-binary/README-extra.md create mode 100644 script/flash-tinyml-binary/README.md create mode 100644 script/flash-tinyml-binary/_cm.json create mode 100644 script/flash-tinyml-binary/customize.py create mode 100644 script/flash-tinyml-binary/run.sh create mode 100644 script/generate-mlperf-inference-submission/README-extra.md create mode 100644 script/generate-mlperf-inference-submission/README.md create mode 100644 script/generate-mlperf-inference-submission/_cm.json create mode 100644 script/generate-mlperf-inference-submission/customize.py create mode 100644 script/generate-mlperf-inference-submission/default_files/analyzer_table.md create mode 100644 script/generate-mlperf-inference-submission/default_files/power_settings.md create mode 100644 script/generate-mlperf-inference-user-conf/README.md create mode 100644 script/generate-mlperf-inference-user-conf/_cm.yaml create mode 100644 script/generate-mlperf-inference-user-conf/customize.py create mode 100644 script/generate-mlperf-tiny-report/README-extra.md create mode 100644 script/generate-mlperf-tiny-report/README.md create mode 100644 script/generate-mlperf-tiny-report/_cm.yaml create mode 100644 script/generate-mlperf-tiny-report/customize.py create mode 100644 script/generate-mlperf-tiny-report/run_submission_checker.bat create mode 100644 script/generate-mlperf-tiny-report/run_submission_checker.sh create mode 100644 script/generate-mlperf-tiny-submission/README-extra.md create mode 100644 script/generate-mlperf-tiny-submission/README.md create mode 100644 script/generate-mlperf-tiny-submission/_cm.json create mode 100644 script/generate-mlperf-tiny-submission/customize.py create mode 100644 script/generate-nvidia-engine/README-about.md create mode 100644 script/generate-nvidia-engine/README.md create mode 100644 script/generate-nvidia-engine/_cm.yaml create mode 100644 script/generate-nvidia-engine/customize.py create mode 100644 script/generate-nvidia-engine/run.sh create mode 100644 script/get-android-sdk/README-extra.md create mode 100644 script/get-android-sdk/README.md create mode 100644 script/get-android-sdk/_cm.json create mode 100644 script/get-android-sdk/customize.py create mode 100644 script/get-android-sdk/prepare-sdk-manager.bat create mode 100644 script/get-android-sdk/prepare-sdk-manager.sh create mode 100644 script/get-aocl/README-extra.md create mode 100644 script/get-aocl/README.md create mode 100644 script/get-aocl/_cm.json create mode 100644 script/get-aocl/customize.py create mode 100644 script/get-aocl/run.sh create mode 100644 script/get-aria2/README-extra.md create mode 100644 script/get-aria2/README.md create mode 100644 script/get-aria2/_cm.yaml create mode 100644 script/get-aria2/customize.py create mode 100644 script/get-aria2/install.bat create mode 100644 script/get-aria2/install.sh create mode 100644 script/get-aria2/run.bat create mode 100644 script/get-aria2/run.sh create mode 100644 script/get-aws-cli/README-extra.md create mode 100644 script/get-aws-cli/README.md create mode 100644 script/get-aws-cli/_cm.json create mode 100644 script/get-aws-cli/customize.py create mode 100644 script/get-aws-cli/run.sh create mode 100644 script/get-bazel/README-extra.md create mode 100644 script/get-bazel/README.md create mode 100644 script/get-bazel/_cm.json create mode 100644 script/get-bazel/customize.py create mode 100644 script/get-bazel/run.bat create mode 100644 script/get-bazel/run.sh create mode 100644 script/get-bert-squad-vocab/README.md create mode 100644 script/get-bert-squad-vocab/_cm.json create mode 100644 script/get-blis/README-extra.md create mode 100644 script/get-blis/README.md create mode 100644 script/get-blis/_cm.json create mode 100644 script/get-blis/customize.py create mode 100644 script/get-blis/run.bat create mode 100644 script/get-blis/run.sh create mode 100644 script/get-brew/README.md create mode 100644 script/get-brew/_cm.json create mode 100644 script/get-brew/dockerfiles/ubuntu_22.04.Dockerfile create mode 100644 script/get-brew/run.sh create mode 100644 script/get-ck-repo-mlops/README.md create mode 100644 script/get-ck-repo-mlops/_cm.json create mode 100644 script/get-ck-repo-mlops/run.bat create mode 100644 script/get-ck-repo-mlops/run.sh create mode 100644 script/get-ck/README.md create mode 100644 script/get-ck/_cm.json create mode 100644 script/get-ck/run.bat create mode 100644 script/get-ck/run.sh create mode 100644 script/get-cl/README-extra.md create mode 100644 script/get-cl/README.md create mode 100644 script/get-cl/_cm.json create mode 100644 script/get-cl/customize.py create mode 100644 script/get-cl/run.bat create mode 100644 script/get-cmake/README.md create mode 100644 script/get-cmake/_cm.json create mode 100644 script/get-cmake/customize.py create mode 100644 script/get-cmake/run.bat create mode 100644 script/get-cmake/run.sh create mode 100644 script/get-cmsis_5/README-extra.md create mode 100644 script/get-cmsis_5/README.md create mode 100644 script/get-cmsis_5/_cm.json create mode 100644 script/get-cmsis_5/customize.py create mode 100644 script/get-cmsis_5/run.sh create mode 100644 script/get-compiler-flags/README.md create mode 100644 script/get-compiler-flags/_cm.json create mode 100644 script/get-compiler-flags/customize.py create mode 100644 script/get-compiler-rust/README.md create mode 100644 script/get-compiler-rust/_cm.yaml create mode 100644 script/get-compiler-rust/customize.py create mode 100644 script/get-compiler-rust/run.sh create mode 100644 script/get-conda/README.md create mode 100644 script/get-conda/_cm.json create mode 100644 script/get-conda/customize.py create mode 100644 script/get-conda/install.bat create mode 100644 script/get-conda/install.sh create mode 100644 script/get-conda/run.bat create mode 100644 script/get-conda/run.sh create mode 100644 script/get-croissant/README.md create mode 100644 script/get-croissant/_cm.yaml create mode 100644 script/get-croissant/customize.py create mode 100644 script/get-croissant/run.bat create mode 100644 script/get-croissant/run.sh create mode 100644 script/get-cuda-devices/README.md create mode 100644 script/get-cuda-devices/_cm.json create mode 100644 script/get-cuda-devices/customize.py create mode 100644 script/get-cuda-devices/print_cuda_devices.cu create mode 100644 script/get-cuda-devices/run.bat create mode 100644 script/get-cuda-devices/run.sh create mode 100644 script/get-cuda/README-about.md create mode 100644 script/get-cuda/README-extra.md create mode 100644 script/get-cuda/README.md create mode 100644 script/get-cuda/_cm.json create mode 100644 script/get-cuda/customize.py create mode 100644 script/get-cuda/run.bat create mode 100644 script/get-cuda/run.sh create mode 100644 script/get-cudnn/README-extra.md create mode 100644 script/get-cudnn/README.md create mode 100644 script/get-cudnn/_cm.json create mode 100644 script/get-cudnn/customize.py create mode 100644 script/get-cudnn/run.sh create mode 100644 script/get-dataset-cifar10/README.md create mode 100644 script/get-dataset-cifar10/_cm.json create mode 100644 script/get-dataset-cifar10/customize.py create mode 100644 script/get-dataset-cifar10/requirements.txt create mode 100644 script/get-dataset-cifar10/run.bat create mode 100644 script/get-dataset-cifar10/run.sh create mode 100644 script/get-dataset-cnndm/README.md create mode 100644 script/get-dataset-cnndm/_cm.json create mode 100644 script/get-dataset-cnndm/customize.py create mode 100644 script/get-dataset-cnndm/run-intel.sh create mode 100644 script/get-dataset-cnndm/run.sh create mode 100644 script/get-dataset-coco/README-extra.md create mode 100644 script/get-dataset-coco/README.md create mode 100644 script/get-dataset-coco/_cm.json create mode 100644 script/get-dataset-coco/customize.py create mode 100644 script/get-dataset-coco2014/README.md create mode 100644 script/get-dataset-coco2014/_cm.yaml create mode 100644 script/get-dataset-coco2014/customize.py create mode 100644 script/get-dataset-coco2014/run.bat create mode 100644 script/get-dataset-coco2014/run.sh create mode 100644 script/get-dataset-criteo/README-extra.md create mode 100644 script/get-dataset-criteo/README.md create mode 100644 script/get-dataset-criteo/_cm.json create mode 100644 script/get-dataset-criteo/run.sh create mode 100644 script/get-dataset-imagenet-aux/README.md create mode 100644 script/get-dataset-imagenet-aux/_cm.json create mode 100644 script/get-dataset-imagenet-aux/run.bat create mode 100644 script/get-dataset-imagenet-aux/run.sh create mode 100644 script/get-dataset-imagenet-calibration/README.md create mode 100644 script/get-dataset-imagenet-calibration/_cm.yaml create mode 100644 script/get-dataset-imagenet-helper/README.md create mode 100644 script/get-dataset-imagenet-helper/_cm.json create mode 100644 script/get-dataset-imagenet-helper/customize.py create mode 100644 script/get-dataset-imagenet-helper/imagenet_helper/__init__.py create mode 100644 script/get-dataset-imagenet-train/README.md create mode 100644 script/get-dataset-imagenet-train/_cm.json create mode 100644 script/get-dataset-imagenet-train/customize.py create mode 100644 script/get-dataset-imagenet-train/run.sh create mode 100644 script/get-dataset-imagenet-val/README-extra.md create mode 100644 script/get-dataset-imagenet-val/README.md create mode 100644 script/get-dataset-imagenet-val/_cm.json create mode 100644 script/get-dataset-imagenet-val/customize.py create mode 100644 script/get-dataset-imagenet-val/run.bat create mode 100644 script/get-dataset-kits19/README.md create mode 100644 script/get-dataset-kits19/_cm.json create mode 100644 script/get-dataset-kits19/customize.py create mode 100644 script/get-dataset-kits19/run.sh create mode 100644 script/get-dataset-librispeech/README-extra.md create mode 100644 script/get-dataset-librispeech/README.md create mode 100644 script/get-dataset-librispeech/_cm.json create mode 100644 script/get-dataset-librispeech/customize.py create mode 100644 script/get-dataset-librispeech/run.sh create mode 100644 script/get-dataset-openimages-annotations/README.md create mode 100644 script/get-dataset-openimages-annotations/_cm.json create mode 100644 script/get-dataset-openimages-annotations/customize.py create mode 100644 script/get-dataset-openimages-annotations/run.sh create mode 100644 script/get-dataset-openimages-calibration/README.md create mode 100644 script/get-dataset-openimages-calibration/_cm.yaml create mode 100644 script/get-dataset-openimages-calibration/customize.py create mode 100644 script/get-dataset-openimages-calibration/filter.py create mode 100644 script/get-dataset-openimages-calibration/run-filter.sh create mode 100644 script/get-dataset-openimages/README-extra.md create mode 100644 script/get-dataset-openimages/README.md create mode 100644 script/get-dataset-openimages/_cm.json create mode 100644 script/get-dataset-openimages/customize.py create mode 100644 script/get-dataset-openimages/run.bat create mode 100644 script/get-dataset-openimages/run.sh create mode 100644 script/get-dataset-openorca/README.md create mode 100644 script/get-dataset-openorca/_cm.json create mode 100644 script/get-dataset-openorca/customize.py create mode 100644 script/get-dataset-squad-vocab/README.md create mode 100644 script/get-dataset-squad-vocab/_cm.json create mode 100644 script/get-dataset-squad-vocab/customize.py create mode 100644 script/get-dataset-squad-vocab/run.sh create mode 100644 script/get-dataset-squad/README-extra.md create mode 100644 script/get-dataset-squad/README.md create mode 100644 script/get-dataset-squad/_cm.json create mode 100644 script/get-dataset-squad/customize.py create mode 100644 script/get-dataset-squad/run.sh create mode 100644 script/get-dlrm-data-mlperf-inference/README.md create mode 100644 script/get-dlrm-data-mlperf-inference/_cm.yaml create mode 100644 script/get-dlrm-data-mlperf-inference/customize.py create mode 100644 script/get-dlrm-data-mlperf-inference/run.sh create mode 100644 script/get-dlrm/README-extra.md create mode 100644 script/get-dlrm/README.md create mode 100644 script/get-dlrm/_cm.json create mode 100644 script/get-dlrm/customize.py create mode 100644 script/get-dlrm/run.sh create mode 100644 script/get-docker/README.md create mode 100644 script/get-docker/_cm.json create mode 100644 script/get-docker/run-ubuntu.sh create mode 100644 script/get-gcc/README-extra.md create mode 100644 script/get-gcc/README.md create mode 100644 script/get-gcc/_cm.json create mode 100644 script/get-gcc/customize.py create mode 100644 script/get-gcc/run.bat create mode 100644 script/get-gcc/run.sh create mode 100644 script/get-generic-python-lib/README-extra.md create mode 100644 script/get-generic-python-lib/README.md create mode 100644 script/get-generic-python-lib/_cm.json create mode 100644 script/get-generic-python-lib/customize.py create mode 100644 script/get-generic-python-lib/detect-version.py create mode 100644 script/get-generic-python-lib/install.bat create mode 100644 script/get-generic-python-lib/install.sh create mode 100644 script/get-generic-python-lib/run.bat create mode 100644 script/get-generic-python-lib/run.sh create mode 100644 script/get-generic-python-lib/tensorflow/run-aarch64.sh create mode 100644 script/get-generic-python-lib/tensorflow/run-macos.sh create mode 100644 script/get-generic-python-lib/uninstall_deps.sh create mode 100644 script/get-generic-sys-util/README.md create mode 100644 script/get-generic-sys-util/_cm.json create mode 100644 script/get-generic-sys-util/customize.py create mode 100644 script/get-generic-sys-util/run.sh create mode 100644 script/get-git-repo/README-extra.md create mode 100644 script/get-git-repo/README.md create mode 100644 script/get-git-repo/_cm.json create mode 100644 script/get-git-repo/customize.py create mode 100644 script/get-git-repo/run.bat create mode 100644 script/get-git-repo/run.sh create mode 100644 script/get-github-cli/README.md create mode 100644 script/get-github-cli/_cm.json create mode 100644 script/get-github-cli/customize.py create mode 100644 script/get-github-cli/run.bat create mode 100644 script/get-github-cli/run.sh create mode 100644 script/get-go/README-extra.md create mode 100644 script/get-go/README.md create mode 100644 script/get-go/_cm.json create mode 100644 script/get-go/customize.py create mode 100644 script/get-go/run.sh create mode 100644 script/get-google-saxml/README.md create mode 100644 script/get-google-saxml/_cm.yaml create mode 100644 script/get-google-saxml/customize.py create mode 100644 script/get-google-saxml/run.bat create mode 100644 script/get-google-saxml/run.sh create mode 100644 script/get-google-test/README.md create mode 100644 script/get-google-test/_cm.json create mode 100644 script/get-google-test/customize.py create mode 100644 script/get-google-test/run.sh create mode 100644 script/get-ipol-src/README-extra.md create mode 100644 script/get-ipol-src/README.md create mode 100644 script/get-ipol-src/_cm.json create mode 100644 script/get-ipol-src/customize.py create mode 100644 script/get-ipol-src/patch/20240127.patch create mode 100644 script/get-java/README-extra.md create mode 100644 script/get-java/README.md create mode 100644 script/get-java/_cm.json create mode 100644 script/get-java/customize.py create mode 100644 script/get-java/install-prebuilt.bat create mode 100644 script/get-java/install-prebuilt.sh create mode 100644 script/get-java/run.bat create mode 100644 script/get-java/run.sh create mode 100644 script/get-javac/README-extra.md create mode 100644 script/get-javac/README.md create mode 100644 script/get-javac/_cm.json create mode 100644 script/get-javac/customize.py create mode 100644 script/get-javac/install-prebuilt.bat create mode 100644 script/get-javac/install-prebuilt.sh create mode 100644 script/get-javac/run.bat create mode 100644 script/get-javac/run.sh create mode 100644 script/get-lib-armnn/README.md create mode 100644 script/get-lib-armnn/_cm.json create mode 100644 script/get-lib-armnn/customize.py create mode 100644 script/get-lib-armnn/run.sh create mode 100644 script/get-lib-dnnl/README.md create mode 100644 script/get-lib-dnnl/_cm.json create mode 100644 script/get-lib-dnnl/customize.py create mode 100644 script/get-lib-dnnl/run.sh create mode 100644 script/get-lib-protobuf/README.md create mode 100644 script/get-lib-protobuf/_cm.json create mode 100644 script/get-lib-protobuf/customize.py create mode 100644 script/get-lib-protobuf/run.sh create mode 100644 script/get-lib-qaic-api/README.md create mode 100644 script/get-lib-qaic-api/_cm.json create mode 100644 script/get-lib-qaic-api/customize.py create mode 100644 script/get-lib-qaic-api/master/QAicInfApi.cpp create mode 100644 script/get-lib-qaic-api/master/QAicInfApi.h create mode 100644 script/get-lib-qaic-api/run.sh create mode 100644 script/get-llvm/README-extra.md create mode 100644 script/get-llvm/README.md create mode 100644 script/get-llvm/_cm.json create mode 100644 script/get-llvm/customize.py create mode 100644 script/get-llvm/run.bat create mode 100644 script/get-llvm/run.sh create mode 100644 script/get-microtvm/README-extra.md create mode 100644 script/get-microtvm/README.md create mode 100644 script/get-microtvm/_cm.json create mode 100644 script/get-microtvm/customize.py create mode 100644 script/get-microtvm/run.sh create mode 100644 script/get-ml-model-3d-unet-kits19/README.md create mode 100644 script/get-ml-model-3d-unet-kits19/_cm.json create mode 100644 script/get-ml-model-3d-unet-kits19/customize.py create mode 100644 script/get-ml-model-bert-base-squad/README.md create mode 100644 script/get-ml-model-bert-base-squad/_cm.json create mode 100644 script/get-ml-model-bert-large-squad/README.md create mode 100644 script/get-ml-model-bert-large-squad/_cm.json create mode 100644 script/get-ml-model-bert-large-squad/customize.py create mode 100644 script/get-ml-model-bert-large-squad/run-packed.sh create mode 100644 script/get-ml-model-dlrm-terabyte/README.md create mode 100644 script/get-ml-model-dlrm-terabyte/_cm.json create mode 100644 script/get-ml-model-dlrm-terabyte/run.sh create mode 100644 script/get-ml-model-efficientnet-lite/README.md create mode 100644 script/get-ml-model-efficientnet-lite/_cm.json create mode 100644 script/get-ml-model-efficientnet-lite/customize.py create mode 100644 script/get-ml-model-gptj/README.md create mode 100644 script/get-ml-model-gptj/_cm.json create mode 100644 script/get-ml-model-gptj/convert_gptj_ckpt.py create mode 100644 script/get-ml-model-gptj/customize.py create mode 100644 script/get-ml-model-gptj/run-int4-calibration.sh create mode 100644 script/get-ml-model-gptj/run-intel.sh create mode 100644 script/get-ml-model-gptj/run-saxml-quantized.sh create mode 100644 script/get-ml-model-gptj/run-saxml.sh create mode 100644 script/get-ml-model-huggingface-zoo/README-extra.md create mode 100644 script/get-ml-model-huggingface-zoo/README.md create mode 100644 script/get-ml-model-huggingface-zoo/_cm.json create mode 100644 script/get-ml-model-huggingface-zoo/customize.py create mode 100644 script/get-ml-model-huggingface-zoo/download_model.py create mode 100644 script/get-ml-model-huggingface-zoo/run.bat create mode 100644 script/get-ml-model-huggingface-zoo/run.sh create mode 100644 script/get-ml-model-llama2/README.md create mode 100644 script/get-ml-model-llama2/_cm.json create mode 100644 script/get-ml-model-llama2/customize.py create mode 100644 script/get-ml-model-mobilenet/README-extra.md create mode 100644 script/get-ml-model-mobilenet/README.md create mode 100644 script/get-ml-model-mobilenet/_cm.json create mode 100644 script/get-ml-model-mobilenet/customize.py create mode 100644 script/get-ml-model-neuralmagic-zoo/README.md create mode 100644 script/get-ml-model-neuralmagic-zoo/_cm.json create mode 100644 script/get-ml-model-neuralmagic-zoo/customize.py create mode 100644 script/get-ml-model-neuralmagic-zoo/download_sparse.py create mode 100644 script/get-ml-model-neuralmagic-zoo/run.bat create mode 100644 script/get-ml-model-neuralmagic-zoo/run.sh create mode 100644 script/get-ml-model-resnet50/README-extra.md create mode 100644 script/get-ml-model-resnet50/README.md create mode 100644 script/get-ml-model-resnet50/_cm.json create mode 100644 script/get-ml-model-resnet50/customize.py create mode 100644 script/get-ml-model-resnet50/run-fix-input.sh create mode 100644 script/get-ml-model-resnet50/run_config.yml create mode 100644 script/get-ml-model-retinanet-nvidia/README.md create mode 100644 script/get-ml-model-retinanet-nvidia/_cm.json create mode 100644 script/get-ml-model-retinanet-nvidia/customize.py create mode 100644 script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py create mode 100644 script/get-ml-model-retinanet-nvidia/polygraphy_script.sh create mode 100644 script/get-ml-model-retinanet-nvidia/run.sh create mode 100644 script/get-ml-model-retinanet/README-extra.md create mode 100644 script/get-ml-model-retinanet/README.md create mode 100644 script/get-ml-model-retinanet/_cm.json create mode 100644 script/get-ml-model-retinanet/customize.py create mode 100644 script/get-ml-model-retinanet/node-precision-info.py create mode 100644 script/get-ml-model-retinanet/run-no-nms.sh create mode 100644 script/get-ml-model-rnnt/README.md create mode 100644 script/get-ml-model-rnnt/_cm.json create mode 100644 script/get-ml-model-rnnt/customize.py create mode 100644 script/get-ml-model-stable-diffusion/README.md create mode 100644 script/get-ml-model-stable-diffusion/_cm.json create mode 100644 script/get-ml-model-stable-diffusion/customize.py create mode 100644 script/get-ml-model-tiny-resnet/README.md create mode 100644 script/get-ml-model-tiny-resnet/_cm.json create mode 100644 script/get-ml-model-tiny-resnet/customize.py create mode 100644 script/get-ml-model-tiny-resnet/run.sh create mode 100644 script/get-ml-model-tiny-resnet/run_config.yml create mode 100644 script/get-ml-model-using-imagenet-from-model-zoo/README.md create mode 100644 script/get-ml-model-using-imagenet-from-model-zoo/_cm.json create mode 100644 script/get-ml-model-using-imagenet-from-model-zoo/customize.py create mode 100644 script/get-mlperf-inference-intel-scratch-space/README.md create mode 100644 script/get-mlperf-inference-intel-scratch-space/_cm.json create mode 100644 script/get-mlperf-inference-intel-scratch-space/customize.py create mode 100644 script/get-mlperf-inference-intel-scratch-space/run.bat create mode 100644 script/get-mlperf-inference-intel-scratch-space/run.sh create mode 100644 script/get-mlperf-inference-loadgen/README-extra.md create mode 100644 script/get-mlperf-inference-loadgen/README.md create mode 100644 script/get-mlperf-inference-loadgen/_cm.yaml create mode 100644 script/get-mlperf-inference-loadgen/customize.py create mode 100644 script/get-mlperf-inference-loadgen/run.bat create mode 100644 script/get-mlperf-inference-loadgen/run.sh create mode 100644 script/get-mlperf-inference-loadgen/tests/download-and-install.bat create mode 100644 script/get-mlperf-inference-nvidia-common-code/README-extra.md create mode 100644 script/get-mlperf-inference-nvidia-common-code/README.md create mode 100644 script/get-mlperf-inference-nvidia-common-code/_cm.json create mode 100644 script/get-mlperf-inference-nvidia-common-code/customize.py create mode 100644 script/get-mlperf-inference-nvidia-scratch-space/README-extra.md create mode 100644 script/get-mlperf-inference-nvidia-scratch-space/README.md create mode 100644 script/get-mlperf-inference-nvidia-scratch-space/_cm.json create mode 100644 script/get-mlperf-inference-nvidia-scratch-space/customize.py create mode 100644 script/get-mlperf-inference-nvidia-scratch-space/run.bat create mode 100644 script/get-mlperf-inference-nvidia-scratch-space/run.sh create mode 100644 script/get-mlperf-inference-results-dir/README.md create mode 100644 script/get-mlperf-inference-results-dir/_cm.json create mode 100644 script/get-mlperf-inference-results-dir/customize.py create mode 100644 script/get-mlperf-inference-results/README-extra.md create mode 100644 script/get-mlperf-inference-results/README.md create mode 100644 script/get-mlperf-inference-results/_cm.json create mode 100644 script/get-mlperf-inference-results/customize.py create mode 100644 script/get-mlperf-inference-src/README-extra.md create mode 100644 script/get-mlperf-inference-src/README.md create mode 100644 script/get-mlperf-inference-src/_cm.json create mode 100644 script/get-mlperf-inference-src/customize.py create mode 100644 script/get-mlperf-inference-src/patch/coco.patch create mode 100644 script/get-mlperf-inference-src/patch/git.patch create mode 100644 script/get-mlperf-inference-src/patch/openimages-pycocotools.patch create mode 100644 script/get-mlperf-inference-src/patch/windows-openimages.patch create mode 100644 script/get-mlperf-inference-src/patch/windows-openimages2.patch create mode 100644 script/get-mlperf-inference-submission-dir/README.md create mode 100644 script/get-mlperf-inference-submission-dir/_cm.json create mode 100644 script/get-mlperf-inference-submission-dir/customize.py create mode 100644 script/get-mlperf-inference-sut-configs/README-extra.md create mode 100644 script/get-mlperf-inference-sut-configs/README.md create mode 100644 script/get-mlperf-inference-sut-configs/_cm.json create mode 100644 script/get-mlperf-inference-sut-configs/configs/default/config.yaml create mode 100644 script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml create mode 100644 script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml create mode 100644 script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml create mode 100644 script/get-mlperf-inference-sut-configs/customize.py create mode 100644 script/get-mlperf-inference-sut-description/README.md create mode 100644 script/get-mlperf-inference-sut-description/_cm.json create mode 100644 script/get-mlperf-inference-sut-description/customize.py create mode 100644 script/get-mlperf-inference-sut-description/detect_memory.sh create mode 100644 script/get-mlperf-inference-sut-description/get_memory_info.py create mode 100644 script/get-mlperf-inference-sut-description/hardware/default.json create mode 100644 script/get-mlperf-inference-utils/README.md create mode 100644 script/get-mlperf-inference-utils/_cm.yaml create mode 100644 script/get-mlperf-inference-utils/customize.py create mode 100644 script/get-mlperf-inference-utils/mlperf_utils.py create mode 100644 script/get-mlperf-logging/README-extra.md create mode 100644 script/get-mlperf-logging/README.md create mode 100644 script/get-mlperf-logging/_cm.json create mode 100644 script/get-mlperf-logging/customize.py create mode 100644 script/get-mlperf-power-dev/README.md create mode 100644 script/get-mlperf-power-dev/_cm.json create mode 100644 script/get-mlperf-power-dev/customize.py create mode 100644 script/get-mlperf-tiny-eembc-energy-runner-src/README.md create mode 100644 script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json create mode 100644 script/get-mlperf-tiny-eembc-energy-runner-src/customize.py create mode 100644 script/get-mlperf-tiny-eembc-energy-runner-src/run.bat create mode 100644 script/get-mlperf-tiny-eembc-energy-runner-src/run.sh create mode 100644 script/get-mlperf-tiny-src/README.md create mode 100644 script/get-mlperf-tiny-src/_cm.json create mode 100644 script/get-mlperf-tiny-src/customize.py create mode 100644 script/get-mlperf-tiny-src/run.bat create mode 100644 script/get-mlperf-tiny-src/run.sh create mode 100644 script/get-mlperf-training-nvidia-code/README.md create mode 100644 script/get-mlperf-training-nvidia-code/_cm.json create mode 100644 script/get-mlperf-training-nvidia-code/customize.py create mode 100644 script/get-mlperf-training-src/README-extra.md create mode 100644 script/get-mlperf-training-src/README.md create mode 100644 script/get-mlperf-training-src/_cm.json create mode 100644 script/get-mlperf-training-src/customize.py create mode 100644 script/get-mlperf-training-src/patch/cpu_load.patch create mode 100644 script/get-mlperf-training-src/patch/nvidia-retinanet.patch create mode 100644 script/get-nvidia-docker/README.md create mode 100644 script/get-nvidia-docker/_cm.json create mode 100644 script/get-nvidia-docker/run-ubuntu.sh create mode 100644 script/get-nvidia-mitten/README-extra.md create mode 100644 script/get-nvidia-mitten/README.md create mode 100644 script/get-nvidia-mitten/_cm.json create mode 100644 script/get-nvidia-mitten/customize.py create mode 100644 script/get-nvidia-mitten/run.bat create mode 100644 script/get-nvidia-mitten/run.sh create mode 100644 script/get-onnxruntime-prebuilt/README.md create mode 100644 script/get-onnxruntime-prebuilt/_cm.json create mode 100644 script/get-onnxruntime-prebuilt/customize.py create mode 100644 script/get-onnxruntime-prebuilt/run.bat create mode 100644 script/get-onnxruntime-prebuilt/run.sh create mode 100644 script/get-openssl/README-extra.md create mode 100644 script/get-openssl/README.md create mode 100644 script/get-openssl/_cm.json create mode 100644 script/get-openssl/customize.py create mode 100644 script/get-openssl/run.sh create mode 100644 script/get-preprocessed-dataset-criteo/README-extra.md create mode 100644 script/get-preprocessed-dataset-criteo/README.md create mode 100644 script/get-preprocessed-dataset-criteo/_cm.json create mode 100644 script/get-preprocessed-dataset-criteo/customize.py create mode 100644 script/get-preprocessed-dataset-criteo/preprocess.py create mode 100644 script/get-preprocessed-dataset-criteo/run-multihot.sh create mode 100644 script/get-preprocessed-dataset-criteo/run.sh create mode 100644 script/get-preprocessed-dataset-generic/README.md create mode 100644 script/get-preprocessed-dataset-generic/_cm.json create mode 100644 script/get-preprocessed-dataset-generic/customize.py create mode 100644 script/get-preprocessed-dataset-generic/src/generic_preprocess.py create mode 100644 script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py create mode 100644 script/get-preprocessed-dataset-imagenet/README-extra.md create mode 100644 script/get-preprocessed-dataset-imagenet/README.md create mode 100644 script/get-preprocessed-dataset-imagenet/_cm.json create mode 100644 script/get-preprocessed-dataset-imagenet/customize.py create mode 100644 script/get-preprocessed-dataset-imagenet/preprocess.py create mode 100644 script/get-preprocessed-dataset-imagenet/run.bat create mode 100644 script/get-preprocessed-dataset-imagenet/run.sh create mode 100644 script/get-preprocessed-dataset-kits19/README.md create mode 100644 script/get-preprocessed-dataset-kits19/_cm.json create mode 100644 script/get-preprocessed-dataset-kits19/customize.py create mode 100644 script/get-preprocessed-dataset-kits19/run.sh create mode 100644 script/get-preprocessed-dataset-librispeech/README.md create mode 100644 script/get-preprocessed-dataset-librispeech/_cm.json create mode 100644 script/get-preprocessed-dataset-librispeech/customize.py create mode 100644 script/get-preprocessed-dataset-librispeech/run.sh create mode 100644 script/get-preprocessed-dataset-openimages/README-extra.md create mode 100644 script/get-preprocessed-dataset-openimages/README.md create mode 100644 script/get-preprocessed-dataset-openimages/_cm.json create mode 100644 script/get-preprocessed-dataset-openimages/customize.py create mode 100644 script/get-preprocessed-dataset-openimages/nvidia_preprocess.py create mode 100644 script/get-preprocessed-dataset-openimages/preprocess.py create mode 100644 script/get-preprocessed-dataset-openimages/run.bat create mode 100644 script/get-preprocessed-dataset-openimages/run.sh create mode 100644 script/get-preprocessed-dataset-openorca/README.md create mode 100644 script/get-preprocessed-dataset-openorca/_cm.json create mode 100644 script/get-preprocessed-dataset-openorca/customize.py create mode 100644 script/get-preprocessed-dataset-openorca/run.sh create mode 100644 script/get-preprocessed-dataset-squad/README.md create mode 100644 script/get-preprocessed-dataset-squad/_cm.yaml create mode 100644 script/get-preprocessed-dataset-squad/customize.py create mode 100644 script/get-preprocessed-dataset-squad/run-packed.sh create mode 100644 script/get-preprocessed-dataset-squad/run.sh create mode 100644 script/get-python3/README-extra.md create mode 100644 script/get-python3/README.md create mode 100644 script/get-python3/_cm.json create mode 100644 script/get-python3/customize.py create mode 100644 script/get-python3/run.bat create mode 100644 script/get-python3/run.sh create mode 100644 script/get-qaic-apps-sdk/README.md create mode 100644 script/get-qaic-apps-sdk/_cm.json create mode 100644 script/get-qaic-apps-sdk/customize.py create mode 100644 script/get-qaic-platform-sdk/README.md create mode 100644 script/get-qaic-platform-sdk/_cm.json create mode 100644 script/get-qaic-platform-sdk/customize.py create mode 100644 script/get-qaic-software-kit/README.md create mode 100644 script/get-qaic-software-kit/_cm.json create mode 100644 script/get-qaic-software-kit/customize.py create mode 100644 script/get-qaic-software-kit/run.sh create mode 100644 script/get-rclone/README.md create mode 100644 script/get-rclone/_cm.json create mode 100644 script/get-rclone/configs/rclone.conf create mode 100644 script/get-rclone/customize.py create mode 100644 script/get-rclone/install-system-macos.sh create mode 100644 script/get-rclone/install-system.sh create mode 100644 script/get-rclone/install.bat create mode 100644 script/get-rclone/install.sh create mode 100644 script/get-rclone/run.bat create mode 100644 script/get-rclone/run.sh create mode 100644 script/get-rocm/README.md create mode 100644 script/get-rocm/_cm.json create mode 100644 script/get-rocm/customize.py create mode 100644 script/get-rocm/run.sh create mode 100644 script/get-spec-ptd/README-extra.md create mode 100644 script/get-spec-ptd/README.md create mode 100644 script/get-spec-ptd/_cm.json create mode 100644 script/get-spec-ptd/customize.py create mode 100644 script/get-spec-ptd/run.sh create mode 100644 script/get-sys-utils-cm/README.md create mode 100644 script/get-sys-utils-cm/_cm.json create mode 100644 script/get-sys-utils-cm/customize.py create mode 100644 script/get-sys-utils-cm/do_pip_installs.sh create mode 100644 script/get-sys-utils-cm/do_pip_installs.sh.old create mode 100644 script/get-sys-utils-cm/requirements.txt create mode 100644 script/get-sys-utils-cm/run-arch.sh create mode 100644 script/get-sys-utils-cm/run-debian.sh create mode 100644 script/get-sys-utils-cm/run-macos.sh create mode 100644 script/get-sys-utils-cm/run-rhel.sh create mode 100644 script/get-sys-utils-cm/run-sles.sh create mode 100644 script/get-sys-utils-cm/run-ubuntu.sh create mode 100644 script/get-sys-utils-min/README.md create mode 100644 script/get-sys-utils-min/_cm.json create mode 100644 script/get-sys-utils-min/customize.py create mode 100644 script/get-tensorrt/README-extra.md create mode 100644 script/get-tensorrt/README.md create mode 100644 script/get-tensorrt/_cm.json create mode 100644 script/get-tensorrt/customize.py create mode 100644 script/get-tensorrt/run.sh create mode 100644 script/get-terraform/README-extra.md create mode 100644 script/get-terraform/README.md create mode 100644 script/get-terraform/_cm.json create mode 100644 script/get-terraform/customize.py create mode 100644 script/get-terraform/run.sh create mode 100644 script/get-tvm-model/README-extra.md create mode 100644 script/get-tvm-model/README.md create mode 100644 script/get-tvm-model/_cm.json create mode 100644 script/get-tvm-model/customize.py create mode 100644 script/get-tvm-model/process.py create mode 100644 script/get-tvm-model/run.sh create mode 100644 script/get-tvm/README-extra.md create mode 100644 script/get-tvm/README.md create mode 100644 script/get-tvm/_cm.json create mode 100644 script/get-tvm/customize.py create mode 100644 script/get-tvm/run.sh create mode 100644 script/get-xilinx-sdk/README.md create mode 100644 script/get-xilinx-sdk/_cm.json create mode 100644 script/get-xilinx-sdk/customize.py create mode 100644 script/get-xilinx-sdk/run.sh create mode 100644 script/get-zendnn/README.md create mode 100644 script/get-zendnn/_cm.json create mode 100644 script/get-zendnn/customize.py create mode 100644 script/get-zendnn/run.bat create mode 100644 script/get-zendnn/run.sh create mode 100644 script/get-zephyr-sdk/README-extra.md create mode 100644 script/get-zephyr-sdk/README.md create mode 100644 script/get-zephyr-sdk/_cm.json create mode 100644 script/get-zephyr-sdk/customize.py create mode 100644 script/get-zephyr-sdk/run.sh create mode 100644 script/get-zephyr/README-extra.md create mode 100644 script/get-zephyr/README.md create mode 100644 script/get-zephyr/_cm.json create mode 100644 script/get-zephyr/customize.py create mode 100644 script/get-zephyr/run-ubuntu.sh create mode 100644 script/get-zephyr/run.sh create mode 100644 script/gui/README-about.md create mode 100644 script/gui/README.md create mode 100644 script/gui/_cm.yaml create mode 100644 script/gui/app.py create mode 100644 script/gui/customize.py create mode 100644 script/gui/graph.py create mode 100644 script/gui/install/linux.md create mode 100644 script/gui/install/macos.md create mode 100644 script/gui/install/redhat.md create mode 100644 script/gui/install/windows.md create mode 100644 script/gui/misc.py create mode 100644 script/gui/playground.py create mode 100644 script/gui/playground_apps.py create mode 100644 script/gui/playground_beta.py create mode 100644 script/gui/playground_beta_README.md create mode 100644 script/gui/playground_challenges.py create mode 100644 script/gui/playground_challenges_with_prizes.py create mode 100644 script/gui/playground_contributors.py create mode 100644 script/gui/playground_howtorun.py create mode 100644 script/gui/playground_install.py create mode 100644 script/gui/playground_reports.py create mode 100644 script/gui/playground_reproduce.py create mode 100644 script/gui/playground_scripts.py create mode 100644 script/gui/run.bat create mode 100644 script/gui/run.sh create mode 100644 script/gui/script.py create mode 100644 script/gui/tests/README.md create mode 100644 script/gui/tests/generate_password.py create mode 100644 script/gui/tests/test.cmd create mode 100644 script/gui/tests/test2.cmd create mode 100644 script/gui/tests/test3.cmd create mode 100644 script/gui/tests/test4.cmd create mode 100644 script/gui/tests/test4a.cmd create mode 100644 script/gui/tests/test4b.cmd create mode 100644 script/gui/tests/test5.cmd create mode 100644 script/import-experiment-to-sqlite/README.md create mode 100644 script/import-mlperf-inference-to-experiment/README-extra.md create mode 100644 script/import-mlperf-inference-to-experiment/README.md create mode 100644 script/import-mlperf-inference-to-experiment/_cm.yaml create mode 100644 script/import-mlperf-inference-to-experiment/customize.py create mode 100644 script/import-mlperf-tiny-to-experiment/README-extra.md create mode 100644 script/import-mlperf-tiny-to-experiment/README.md create mode 100644 script/import-mlperf-tiny-to-experiment/_cm.yaml create mode 100644 script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png create mode 100644 script/import-mlperf-tiny-to-experiment/customize.py create mode 100644 script/import-mlperf-training-to-experiment/README-extra.md create mode 100644 script/import-mlperf-training-to-experiment/README.md create mode 100644 script/import-mlperf-training-to-experiment/_cm.yaml create mode 100644 script/import-mlperf-training-to-experiment/customize.py create mode 100644 script/import-mlperf-training-to-experiment/run_mlperf_logger.sh create mode 100644 script/install-aws-cli/README.md create mode 100644 script/install-aws-cli/_cm.json create mode 100644 script/install-aws-cli/customize.py create mode 100644 script/install-aws-cli/run.sh create mode 100644 script/install-bazel/README.md create mode 100644 script/install-bazel/_cm.json create mode 100644 script/install-bazel/customize.py create mode 100644 script/install-bazel/run-aarch64.sh create mode 100644 script/install-bazel/run.bat create mode 100644 script/install-bazel/run.sh create mode 100644 script/install-cmake-prebuilt/README.md create mode 100644 script/install-cmake-prebuilt/_cm.json create mode 100644 script/install-cmake-prebuilt/customize.py create mode 100644 script/install-cmake-prebuilt/run.sh create mode 100644 script/install-cuda-package-manager/README.md create mode 100644 script/install-cuda-package-manager/_cm.json create mode 100644 script/install-cuda-package-manager/customize.py create mode 100644 script/install-cuda-package-manager/run-ubuntu.sh create mode 100644 script/install-cuda-package-manager/run.sh create mode 100644 script/install-cuda-prebuilt/README-extra.md create mode 100644 script/install-cuda-prebuilt/README.md create mode 100644 script/install-cuda-prebuilt/_cm.json create mode 100644 script/install-cuda-prebuilt/customize.py create mode 100644 script/install-cuda-prebuilt/run.sh create mode 100644 script/install-gcc-src/README.md create mode 100644 script/install-gcc-src/_cm.json create mode 100644 script/install-gcc-src/customize.py create mode 100644 script/install-gcc-src/run.sh create mode 100644 script/install-generic-conda-package/README.md create mode 100644 script/install-generic-conda-package/_cm.json create mode 100644 script/install-generic-conda-package/customize.py create mode 100644 script/install-generic-conda-package/run.sh create mode 100644 script/install-gflags/README.md create mode 100644 script/install-gflags/_cm.json create mode 100644 script/install-gflags/customize.py create mode 100644 script/install-gflags/run.sh create mode 100644 script/install-github-cli/README.md create mode 100644 script/install-github-cli/_cm.json create mode 100644 script/install-github-cli/customize.py create mode 100644 script/install-github-cli/run-macos.sh create mode 100644 script/install-github-cli/run-rhel.sh create mode 100644 script/install-github-cli/run.sh create mode 100644 script/install-ipex-from-src/README.md create mode 100644 script/install-ipex-from-src/_cm.json create mode 100644 script/install-ipex-from-src/customize.py create mode 100644 script/install-ipex-from-src/run.sh create mode 100644 script/install-llvm-prebuilt/README-extra.md create mode 100644 script/install-llvm-prebuilt/README.md create mode 100644 script/install-llvm-prebuilt/_cm.json create mode 100644 script/install-llvm-prebuilt/customize.py create mode 100644 script/install-llvm-prebuilt/run.bat create mode 100644 script/install-llvm-prebuilt/run.sh create mode 100644 script/install-llvm-src/README.md create mode 100644 script/install-llvm-src/_cm.json create mode 100644 script/install-llvm-src/customize.py create mode 100644 script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh create mode 100644 script/install-llvm-src/run.sh create mode 100644 script/install-mlperf-logging-from-src/README.md create mode 100644 script/install-mlperf-logging-from-src/_cm.yaml create mode 100644 script/install-mlperf-logging-from-src/customize.py create mode 100644 script/install-mlperf-logging-from-src/run.sh create mode 100644 script/install-nccl-libs/README.md create mode 100644 script/install-nccl-libs/_cm.yaml create mode 100644 script/install-nccl-libs/customize.py create mode 100644 script/install-nccl-libs/run-ubuntu.sh create mode 100644 script/install-nccl-libs/run.sh create mode 100644 script/install-numactl-from-src/README.md create mode 100644 script/install-numactl-from-src/_cm.json create mode 100644 script/install-numactl-from-src/customize.py create mode 100644 script/install-numactl-from-src/run.sh create mode 100644 script/install-onednn-from-src/README.md create mode 100644 script/install-onednn-from-src/_cm.json create mode 100644 script/install-onednn-from-src/customize.py create mode 100644 script/install-onednn-from-src/run-intel-mlperf-inference.sh create mode 100644 script/install-onnxruntime-from-src/README.md create mode 100644 script/install-onnxruntime-from-src/_cm.json create mode 100644 script/install-onnxruntime-from-src/customize.py create mode 100644 script/install-onnxruntime-from-src/run.sh create mode 100644 script/install-openssl/README.md create mode 100644 script/install-openssl/_cm.json create mode 100644 script/install-openssl/customize.py create mode 100644 script/install-openssl/run.sh create mode 100644 script/install-pip-package-for-cmind-python/README.md create mode 100644 script/install-pip-package-for-cmind-python/_cm.yaml create mode 100644 script/install-pip-package-for-cmind-python/customize.py create mode 100644 script/install-python-src/README.md create mode 100644 script/install-python-src/_cm.json create mode 100644 script/install-python-src/customize.py create mode 100644 script/install-python-src/run.sh create mode 100644 script/install-python-venv/README.md create mode 100644 script/install-python-venv/_cm.json create mode 100644 script/install-python-venv/customize.py create mode 100644 script/install-python-venv/run.bat create mode 100644 script/install-python-venv/run.sh create mode 100644 script/install-pytorch-from-src/README.md create mode 100644 script/install-pytorch-from-src/_cm.json create mode 100644 script/install-pytorch-from-src/customize.py create mode 100644 script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh create mode 100644 script/install-pytorch-from-src/run.sh create mode 100644 script/install-pytorch-kineto-from-src/README.md create mode 100644 script/install-pytorch-kineto-from-src/_cm.json create mode 100644 script/install-pytorch-kineto-from-src/customize.py create mode 100644 script/install-pytorch-kineto-from-src/run.sh create mode 100644 script/install-qaic-compute-sdk-from-src/README.md create mode 100644 script/install-qaic-compute-sdk-from-src/_cm.json create mode 100644 script/install-qaic-compute-sdk-from-src/customize.py create mode 100644 script/install-qaic-compute-sdk-from-src/run.sh create mode 100644 script/install-rocm/README.md create mode 100644 script/install-rocm/_cm.json create mode 100644 script/install-rocm/customize.py create mode 100644 script/install-rocm/run-rhel.sh create mode 100644 script/install-rocm/run-ubuntu.sh create mode 100644 script/install-rocm/run.sh create mode 100644 script/install-tensorflow-for-c/README.md create mode 100644 script/install-tensorflow-for-c/_cm.json create mode 100644 script/install-tensorflow-for-c/customize.py create mode 100644 script/install-tensorflow-for-c/run.sh create mode 100644 script/install-tensorflow-from-src/README.md create mode 100644 script/install-tensorflow-from-src/_cm.json create mode 100644 script/install-tensorflow-from-src/customize.py create mode 100644 script/install-tensorflow-from-src/run.sh create mode 100644 script/install-terraform-from-src/README.md create mode 100644 script/install-terraform-from-src/_cm.json create mode 100644 script/install-terraform-from-src/customize.py create mode 100644 script/install-terraform-from-src/run.sh create mode 100644 script/install-tflite-from-src/README.md create mode 100644 script/install-tflite-from-src/_cm.json create mode 100644 script/install-tflite-from-src/customize.py create mode 100644 script/install-tflite-from-src/run.sh create mode 100644 script/install-torchvision-from-src/README.md create mode 100644 script/install-torchvision-from-src/_cm.json create mode 100644 script/install-torchvision-from-src/customize.py create mode 100644 script/install-torchvision-from-src/run.sh create mode 100644 script/install-tpp-pytorch-extension/README.md create mode 100644 script/install-tpp-pytorch-extension/_cm.json create mode 100644 script/install-tpp-pytorch-extension/customize.py create mode 100644 script/install-tpp-pytorch-extension/run.sh create mode 100644 script/install-transformers-from-src/README.md create mode 100644 script/install-transformers-from-src/_cm.json create mode 100644 script/install-transformers-from-src/customize.py create mode 100644 script/install-transformers-from-src/run.sh create mode 100644 script/launch-benchmark/README-extra.md create mode 100644 script/launch-benchmark/README.md create mode 100644 script/launch-benchmark/_cm.yaml create mode 100644 script/launch-benchmark/customize.py create mode 100644 script/launch-benchmark/tests/debug.py create mode 100644 script/prepare-training-data-bert/README.md create mode 100644 script/prepare-training-data-bert/_cm.json create mode 100644 script/prepare-training-data-bert/customize.py create mode 100644 script/prepare-training-data-bert/run-nvidia.sh create mode 100644 script/prepare-training-data-bert/run-reference.sh create mode 100644 script/prepare-training-data-bert/run.sh create mode 100644 script/prepare-training-data-bert/run_config.yml create mode 100644 script/prepare-training-data-resnet/README.md create mode 100644 script/prepare-training-data-resnet/_cm.json create mode 100644 script/prepare-training-data-resnet/customize.py create mode 100644 script/prepare-training-data-resnet/run-nvidia.sh create mode 100644 script/prepare-training-data-resnet/run-reference.sh create mode 100644 script/prepare-training-data-resnet/run_config.yml create mode 100644 script/preprocess-mlperf-inference-submission/README.md create mode 100644 script/preprocess-mlperf-inference-submission/_cm.json create mode 100644 script/preprocess-mlperf-inference-submission/customize.py create mode 100644 script/preprocess-mlperf-inference-submission/run.sh create mode 100644 script/print-croissant-desc/README-extra.md create mode 100644 script/print-croissant-desc/README.md create mode 100644 script/print-croissant-desc/_cm.yaml create mode 100644 script/print-croissant-desc/code.py create mode 100644 script/print-croissant-desc/run.bat create mode 100644 script/print-croissant-desc/run.sh create mode 100644 script/print-hello-world-java/README.md create mode 100644 script/print-hello-world-java/_cm.json create mode 100644 script/print-hello-world-java/code.java create mode 100644 script/print-hello-world-java/run.bat create mode 100644 script/print-hello-world-java/run.sh create mode 100644 script/print-hello-world-javac/README.md create mode 100644 script/print-hello-world-javac/_cm.json create mode 100644 script/print-hello-world-javac/code.java create mode 100644 script/print-hello-world-javac/run.bat create mode 100644 script/print-hello-world-javac/run.sh create mode 100644 script/print-hello-world-py/README.md create mode 100644 script/print-hello-world-py/_cm.json create mode 100644 script/print-hello-world-py/code.py create mode 100644 script/print-hello-world-py/run.bat create mode 100644 script/print-hello-world-py/run.sh create mode 100644 script/print-hello-world/README.md create mode 100644 script/print-hello-world/_cm.json create mode 100644 script/print-hello-world/run.bat create mode 100644 script/print-hello-world/run.sh create mode 100644 script/print-python-version/README.md create mode 100644 script/print-python-version/_cm.json create mode 100644 script/print-python-version/run.bat create mode 100644 script/print-python-version/run.sh create mode 100644 script/process-ae-users/README.md create mode 100644 script/process-ae-users/_cm.json create mode 100644 script/process-ae-users/code.py create mode 100644 script/process-ae-users/customize.py create mode 100644 script/process-ae-users/run.bat create mode 100644 script/process-ae-users/run.sh create mode 100644 script/process-mlperf-accuracy/README.md create mode 100644 script/process-mlperf-accuracy/_cm.json create mode 100644 script/process-mlperf-accuracy/customize.py create mode 100644 script/process-mlperf-accuracy/run.bat create mode 100644 script/process-mlperf-accuracy/run.sh create mode 100644 script/prune-bert-models/README-extra.md create mode 100644 script/prune-bert-models/README.md create mode 100644 script/prune-bert-models/_cm.json create mode 100644 script/prune-bert-models/customize.py create mode 100644 script/prune-bert-models/run.sh create mode 100644 script/prune-docker/README.md create mode 100644 script/prune-docker/_cm.json create mode 100644 script/prune-docker/run.bat create mode 100644 script/prune-docker/run.sh create mode 100644 script/publish-results-to-dashboard/README.md create mode 100644 script/publish-results-to-dashboard/_cm.json create mode 100644 script/publish-results-to-dashboard/code.py create mode 100644 script/publish-results-to-dashboard/run.bat create mode 100644 script/publish-results-to-dashboard/run.sh create mode 100644 script/pull-git-repo/README.md create mode 100644 script/pull-git-repo/_cm.json create mode 100644 script/pull-git-repo/customize.py create mode 100644 script/pull-git-repo/run.sh create mode 100644 script/push-csv-to-spreadsheet/README.md create mode 100644 script/push-csv-to-spreadsheet/_cm.json create mode 100644 script/push-csv-to-spreadsheet/customize.py create mode 100644 script/push-csv-to-spreadsheet/google_api.py create mode 100644 script/push-csv-to-spreadsheet/run.sh create mode 100644 script/push-mlperf-inference-results-to-github/README.md create mode 100644 script/push-mlperf-inference-results-to-github/_cm.json create mode 100644 script/push-mlperf-inference-results-to-github/customize.py create mode 100644 script/push-mlperf-inference-results-to-github/run.sh create mode 100644 script/remote-run-commands/README-extra.md create mode 100644 script/remote-run-commands/README.md create mode 100644 script/remote-run-commands/_cm.json create mode 100644 script/remote-run-commands/customize.py create mode 100644 script/remote-run-commands/run.bat create mode 100644 script/remote-run-commands/run.sh create mode 100644 script/reproduce-ipol-paper-2022-439/README-extra.md create mode 100644 script/reproduce-ipol-paper-2022-439/README.md create mode 100644 script/reproduce-ipol-paper-2022-439/_cm.yaml create mode 100644 script/reproduce-ipol-paper-2022-439/customize.py create mode 100644 script/reproduce-ipol-paper-2022-439/requirements.txt create mode 100644 script/reproduce-ipol-paper-2022-439/run.bat create mode 100644 script/reproduce-ipol-paper-2022-439/run.sh create mode 100644 script/reproduce-micro-paper-2023-victima/README-extra.md create mode 100644 script/reproduce-micro-paper-2023-victima/README.md create mode 100644 script/reproduce-micro-paper-2023-victima/_cm.yaml create mode 100644 script/reproduce-micro-paper-2023-victima/customize.py create mode 100644 script/reproduce-micro-paper-2023-victima/install_deps.sh create mode 100644 script/reproduce-micro-paper-2023-victima/main.py create mode 100644 script/reproduce-micro-paper-2023-victima/plot.sh create mode 100644 script/reproduce-micro-paper-2023-victima/run.sh create mode 100644 script/reproduce-micro-paper-2023-xyz/README.md create mode 100644 script/reproduce-mlperf-inference-dummy/README.md create mode 100644 script/reproduce-mlperf-octoml-tinyml-results/README-extra.md create mode 100644 script/reproduce-mlperf-octoml-tinyml-results/README.md create mode 100644 script/reproduce-mlperf-octoml-tinyml-results/_cm.json create mode 100644 script/reproduce-mlperf-octoml-tinyml-results/customize.py create mode 100644 script/reproduce-mlperf-octoml-tinyml-results/dockerfiles/ubuntu_20.04.Dockerfile create mode 100644 script/reproduce-mlperf-octoml-tinyml-results/run.sh create mode 100644 script/reproduce-mlperf-training-nvidia/README.md create mode 100644 script/reproduce-mlperf-training-nvidia/_cm.yaml create mode 100644 script/reproduce-mlperf-training-nvidia/customize.py create mode 100644 script/reproduce-mlperf-training-nvidia/run-resnet.sh create mode 100644 script/reproduce-mlperf-training-nvidia/run.sh create mode 100644 script/run-all-mlperf-models/README.md create mode 100644 script/run-all-mlperf-models/_cm.yaml create mode 100644 script/run-all-mlperf-models/customize.py create mode 100644 script/run-all-mlperf-models/run-bert-macos.sh create mode 100644 script/run-all-mlperf-models/run-bert.sh create mode 100644 script/run-all-mlperf-models/run-cpp-implementation.sh create mode 100644 script/run-all-mlperf-models/run-mobilenet-models.sh create mode 100644 script/run-all-mlperf-models/run-nvidia-4090.sh create mode 100644 script/run-all-mlperf-models/run-nvidia-a100.sh create mode 100644 script/run-all-mlperf-models/run-nvidia-t4.sh create mode 100644 script/run-all-mlperf-models/run-pruned-bert.sh create mode 100644 script/run-all-mlperf-models/run-reference-models.sh create mode 100644 script/run-all-mlperf-models/run-resnet50-macos.sh create mode 100644 script/run-all-mlperf-models/run-resnet50.sh create mode 100644 script/run-all-mlperf-models/run-retinanet-sh create mode 100644 script/run-all-mlperf-models/template.sh create mode 100644 script/run-docker-container/README-extra.md create mode 100644 script/run-docker-container/README.md create mode 100644 script/run-docker-container/_cm.json create mode 100644 script/run-docker-container/customize.py create mode 100644 script/run-mlperf-inference-app/README-extra.md create mode 100644 script/run-mlperf-inference-app/README.md create mode 100644 script/run-mlperf-inference-app/_cm.yaml create mode 100644 script/run-mlperf-inference-app/customize.py create mode 100644 script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md create mode 100644 script/run-mlperf-inference-app/faq/deepsparse.md create mode 100644 script/run-mlperf-inference-app/faq/intel.md create mode 100644 script/run-mlperf-inference-app/faq/mlcommons-cpp.md create mode 100644 script/run-mlperf-inference-app/faq/mlcommons-python.md create mode 100644 script/run-mlperf-inference-app/faq/nvidia.md create mode 100644 script/run-mlperf-inference-app/faq/qualcomm.md create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/README.md create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/_common.bat create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/_common.sh create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/build.bat create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/build.sh create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/run.bat create mode 100644 script/run-mlperf-inference-app/modular-cm-containers/run.sh create mode 100644 script/run-mlperf-inference-app/run_mobilenet.py create mode 100644 script/run-mlperf-inference-app/setup/b-deepsparse.md create mode 100644 script/run-mlperf-inference-app/setup/i-intel.md create mode 100644 script/run-mlperf-inference-app/setup/i-nvidia.md create mode 100644 script/run-mlperf-inference-app/setup/i-qualcomm.md create mode 100644 script/run-mlperf-inference-mobilenet-models/README-about.md create mode 100644 script/run-mlperf-inference-mobilenet-models/README.md create mode 100644 script/run-mlperf-inference-mobilenet-models/_cm.json create mode 100644 script/run-mlperf-inference-mobilenet-models/customize.py create mode 100644 script/run-mlperf-inference-mobilenet-models/run.sh create mode 100644 script/run-mlperf-inference-submission-checker/README-extra.md create mode 100644 script/run-mlperf-inference-submission-checker/README.md create mode 100644 script/run-mlperf-inference-submission-checker/_cm.json create mode 100644 script/run-mlperf-inference-submission-checker/code.py create mode 100644 script/run-mlperf-inference-submission-checker/customize.py create mode 100644 script/run-mlperf-inference-submission-checker/run.bat create mode 100644 script/run-mlperf-inference-submission-checker/run.sh create mode 100644 script/run-mlperf-power-client/README-extra.md create mode 100644 script/run-mlperf-power-client/README.md create mode 100644 script/run-mlperf-power-client/_cm.json create mode 100644 script/run-mlperf-power-client/customize.py create mode 100644 script/run-mlperf-power-client/dummy.sh create mode 100644 script/run-mlperf-power-client/run.sh create mode 100644 script/run-mlperf-power-server/README-extra.md create mode 100644 script/run-mlperf-power-server/README.md create mode 100644 script/run-mlperf-power-server/_cm.json create mode 100644 script/run-mlperf-power-server/customize.py create mode 100644 script/run-mlperf-power-server/run.bat create mode 100644 script/run-mlperf-power-server/run.sh create mode 100644 script/run-mlperf-training-submission-checker/README.md create mode 100644 script/run-mlperf-training-submission-checker/_cm.json create mode 100644 script/run-mlperf-training-submission-checker/customize.py create mode 100644 script/run-mlperf-training-submission-checker/run.sh create mode 100644 script/run-python/README.md create mode 100644 script/run-python/_cm.json create mode 100644 script/run-python/run.bat create mode 100644 script/run-python/run.sh create mode 100644 script/run-terraform/README-about.md create mode 100644 script/run-terraform/README-extra.md create mode 100644 script/run-terraform/README.md create mode 100644 script/run-terraform/_cm.json create mode 100644 script/run-terraform/aws/apply_credentials.sh create mode 100644 script/run-terraform/aws/credentials.example create mode 100644 script/run-terraform/aws/main.tf create mode 100644 script/run-terraform/customize.py create mode 100644 script/run-terraform/gcp/apply_credentials.sh create mode 100644 script/run-terraform/gcp/main.tf create mode 100644 script/run-terraform/run.sh create mode 100644 script/save-mlperf-inference-implementation-state/README.md create mode 100644 script/save-mlperf-inference-implementation-state/_cm.yaml create mode 100644 script/save-mlperf-inference-implementation-state/customize.py create mode 100644 script/set-device-settings-qaic/README.md create mode 100644 script/set-device-settings-qaic/_cm.json create mode 100644 script/set-device-settings-qaic/customize.py create mode 100644 script/set-device-settings-qaic/run.sh create mode 100644 script/set-echo-off-win/README.md create mode 100644 script/set-echo-off-win/_cm.json create mode 100644 script/set-echo-off-win/customize.py create mode 100644 script/set-performance-mode/README.md create mode 100644 script/set-performance-mode/_cm.json create mode 100644 script/set-performance-mode/customize.py create mode 100644 script/set-performance-mode/run-ubuntu.sh create mode 100644 script/set-performance-mode/run.bat create mode 100644 script/set-performance-mode/run.sh create mode 100644 script/set-sqlite-dir/README.md create mode 100644 script/set-sqlite-dir/_cm.json create mode 100644 script/set-sqlite-dir/code.py create mode 100644 script/set-sqlite-dir/customize.py create mode 100644 script/set-sqlite-dir/run.bat create mode 100644 script/set-sqlite-dir/run.sh create mode 100644 script/set-venv/README-extra.md create mode 100644 script/set-venv/README.md create mode 100644 script/set-venv/_cm.yaml create mode 100644 script/set-venv/customize.py create mode 100644 script/tar-my-folder/README-extra.md create mode 100644 script/tar-my-folder/README.md create mode 100644 script/tar-my-folder/_cm.json create mode 100644 script/tar-my-folder/customize.py create mode 100644 script/test-download-and-extract-artifacts/README-extra.md create mode 100644 script/test-download-and-extract-artifacts/README.md create mode 100644 script/test-download-and-extract-artifacts/_cm.yaml create mode 100644 script/test-download-and-extract-artifacts/customize.py create mode 100644 script/test-download-and-extract-artifacts/run.bat create mode 100644 script/test-download-and-extract-artifacts/run.sh create mode 100644 script/test-mlperf-inference-retinanet/README.md create mode 100644 script/test-mlperf-inference-retinanet/_cm.json create mode 100644 script/test-mlperf-inference-retinanet/customize.py create mode 100644 script/test-mlperf-inference-retinanet/run.bat create mode 100644 script/test-mlperf-inference-retinanet/run.sh create mode 100644 script/test-set-sys-user-cm/README.md create mode 100644 script/test-set-sys-user-cm/_cm.json create mode 100644 script/test-set-sys-user-cm/run.sh create mode 100644 script/truncate-mlperf-inference-accuracy-log/README-extra.md create mode 100644 script/truncate-mlperf-inference-accuracy-log/README.md create mode 100644 script/truncate-mlperf-inference-accuracy-log/_cm.json create mode 100644 script/truncate-mlperf-inference-accuracy-log/customize.py create mode 100644 script/truncate-mlperf-inference-accuracy-log/run.sh create mode 100644 script/upgrade-python-pip/README.md create mode 100644 script/upgrade-python-pip/_cm.json create mode 100644 script/upgrade-python-pip/run.bat create mode 100644 script/upgrade-python-pip/run.sh create mode 100644 script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md create mode 100644 script/wrapper-reproduce-octoml-tinyml-submission/README.md create mode 100644 script/wrapper-reproduce-octoml-tinyml-submission/_cm.json create mode 100644 script/wrapper-reproduce-octoml-tinyml-submission/customize.py create mode 100644 script/wrapper-reproduce-octoml-tinyml-submission/run.sh diff --git a/.github/workflows/check-all-broken-links.md b/.github/workflows/check-all-broken-links.md new file mode 100644 index 0000000000..99f57b56df --- /dev/null +++ b/.github/workflows/check-all-broken-links.md @@ -0,0 +1,17 @@ +name: Check .md README files for broken links + +on: + push: [master] + +jobs: + markdown-link-check: + runs-on: ubuntu-latest + # check out the latest version of the code + steps: + - uses: actions/checkout@v3 + + # Checks the status of hyperlinks in .md files in verbose mode + - name: Check links + uses: gaurav-nelson/github-action-markdown-link-check@v1 + with: + use-quiet-mode: 'yes' diff --git a/.github/workflows/check-broken-links.md b/.github/workflows/check-broken-links.md new file mode 100644 index 0000000000..a753ec75ba --- /dev/null +++ b/.github/workflows/check-broken-links.md @@ -0,0 +1,17 @@ +name: Check .md README files for broken links + +on: [pull_request] + +jobs: + markdown-link-check: + runs-on: ubuntu-latest + # check out the latest version of the code + steps: + - uses: actions/checkout@v3 + + # Checks the status of hyperlinks in .md files in verbose mode + - name: Check links + uses: gaurav-nelson/github-action-markdown-link-check@v1 + with: + use-quiet-mode: 'yes' + check-modified-files-only: 'yes' diff --git a/.github/workflows/test-cm-script-features.yml b/.github/workflows/test-cm-script-features.yml new file mode 100644 index 0000000000..026c79e74d --- /dev/null +++ b/.github/workflows/test-cm-script-features.yml @@ -0,0 +1,38 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: CM script automation features test + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-cm-script-features.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.12", "3.11", "3.10", "3.9", "3.8"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test CM Script Features + run: | + python tests/script/test_install.py + python tests/script/test_docker.py + python tests/script/test_features.py diff --git a/.github/workflows/test-cm-scripts.yml b/.github/workflows/test-cm-scripts.yml new file mode 100644 index 0000000000..fc00a22f5a --- /dev/null +++ b/.github/workflows/test-cm-scripts.yml @@ -0,0 +1,36 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: CM script automation test + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-cm-scripts.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.12", "3.9"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test CM Script Automation + run: | + python tests/script/test_deps.py diff --git a/.github/workflows/test-cm-tutorial-retinanet.yml b/.github/workflows/test-cm-tutorial-retinanet.yml new file mode 100644 index 0000000000..8125920821 --- /dev/null +++ b/.github/workflows/test-cm-tutorial-retinanet.yml @@ -0,0 +1,35 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: CM tutorial retinanet + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-cm-tutorial-retinanet.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test CM Tutorial Retinanet + run: | + python tests/tutorials/test_tutorial_retinanet.py diff --git a/.github/workflows/test-cm-tutorial-tvm-pip.yml b/.github/workflows/test-cm-tutorial-tvm-pip.yml new file mode 100644 index 0000000000..f4fbbe8215 --- /dev/null +++ b/.github/workflows/test-cm-tutorial-tvm-pip.yml @@ -0,0 +1,57 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: CM tutorial tvm pip install + +on: + pull_request: + branches: [ "master", "test" ] + paths: + - '.github/workflows/test-cm-tutorial-tvm-pip.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + test_vm_runtime: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test CM Tutorial TVM pip install with VirtualMachine Runtime + run: | + python tests/tutorials/test_tutorial_tvm_pip_vm.py + + test_ge_runtime: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test CM Tutorial TVM pip install with GraphExecutor Runtime + run: | + python tests/tutorials/test_tutorial_tvm_pip_ge.py diff --git a/.github/workflows/test-cm-tutorial-tvm.yml b/.github/workflows/test-cm-tutorial-tvm.yml new file mode 100644 index 0000000000..5a08e3fc5a --- /dev/null +++ b/.github/workflows/test-cm-tutorial-tvm.yml @@ -0,0 +1,36 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: CM tutorial tvm + +on: + pull_request: + branches: [ "test" ] + paths: + - '.github/workflows/test-cm-tutorial-tvm.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test CM Tutorial TVM + run: | + python tests/tutorials/test_tutorial_tvm.py diff --git a/.github/workflows/test-cm.yml b/.github/workflows/test-cm.yml new file mode 100644 index 0000000000..ce3ee706d7 --- /dev/null +++ b/.github/workflows/test-cm.yml @@ -0,0 +1,69 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: CM test + +on: + pull_request: + branches: [ "master" ] + paths: + - '.github/workflows/test-cm.yml' + - 'cm/**' + - '!cm/**.md' + +jobs: + build: + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + on: [ubuntu-latest] + runs-on: "${{ matrix.on }}" + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + python -m pip install --ignore-installed --verbose pip setuptools + cd cm + python setup.py install + python -m cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 cm/cmind --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 cm/cmind --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test + run: | + python tests/test_cm.py + + test_cm: + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + on: [ubuntu-latest, windows-latest] + runs-on: "${{ matrix.on }}" + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + - name: Test CM + run: | + python tests/test_cm.py + diff --git a/.github/workflows/test-image-classification-onnx.yml b/.github/workflows/test-image-classification-onnx.yml new file mode 100644 index 0000000000..62049d1c51 --- /dev/null +++ b/.github/workflows/test-image-classification-onnx.yml @@ -0,0 +1,36 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: image classification with ONNX + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-image-classification-onnx.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.9"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test image classification with ONNX + run: | + cmr "python app image-classification onnx" --quiet diff --git a/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml new file mode 100644 index 0000000000..89760294a6 --- /dev/null +++ b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -0,0 +1,44 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference bert (deepsparse, tf, onnxruntime, pytorch) + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + # 3.12 didn't work on 20240305 - need to check + python-version: [ "3.11", "3.9" ] + backend: [ "deepsparse", "tf", "onnxruntime", "pytorch" ] + precision: [ "int8", "fp32" ] + exclude: + - backend: tf + - backend: pytorch + - backend: onnxruntime + - precision: fp32 + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test MLPerf Inference Bert (DeepSparse, TF, ONNX, PyTorch) + run: | + cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="cTuning" --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml new file mode 100644 index 0000000000..ea1a70fa86 --- /dev/null +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -0,0 +1,38 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference GPT-J + +on: + pull_request: + branches: [ "master1", "dev1" ] + paths: + - '.github/workflows/test-mlperf-inference-gptj.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.9" ] + backend: [ "pytorch" ] + precision: [ "bfloat16" ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test MLPerf Inference GPTJ + run: | + cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="cTuning" --model=gptj --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet diff --git a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml new file mode 100644 index 0000000000..a28e86825f --- /dev/null +++ b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml @@ -0,0 +1,38 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference MLCommons C++ ResNet50 + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.9" ] + llvm-version: [ "15.0.6", "16.0.4", "17.0.6" ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + cm run script --quiet --tags=install,prebuilt,llvm --version=${{ matrix.llvm-version }} + - name: Test MLPerf Inference MLCommons C++ ResNet50 + run: | + cmr "app mlperf inference mlcommons cpp" -v --quiet diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml new file mode 100644 index 0000000000..29b7b3cb59 --- /dev/null +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -0,0 +1,43 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference resnet50 + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-mlperf-inference-resnet50.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + env: + CM_INDEX: "on" + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.9" ] + backend: [ "onnxruntime", "tf" ] + implementation: [ "python", "cpp" ] + exclude: + - backend: tf + implementation: cpp + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test MLPerf Inference ResNet50 + run: | + cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="cTuning" --hw_name=default --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet diff --git a/.github/workflows/test-mlperf-inference-retinanet.yml b/.github/workflows/test-mlperf-inference-retinanet.yml new file mode 100644 index 0000000000..4846aa2bd1 --- /dev/null +++ b/.github/workflows/test-mlperf-inference-retinanet.yml @@ -0,0 +1,41 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference retinanet + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - 'cm-mlops/**' + - '.github/workflows/test-mlperf-inference-retinanet.yml' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.9" ] + backend: [ "onnxruntime", "pytorch" ] + implementation: [ "python", "cpp" ] + exclude: + - backend: pytorch + implementation: cpp + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test MLPerf Inference Retinanet using ${{ matrix.backend }} + run: | + cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="cTuning" --hw_name=default --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.compiler.tags=gcc --quiet -v --target_qps=1 diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml new file mode 100644 index 0000000000..d6c1ae6a2e --- /dev/null +++ b/.github/workflows/test-mlperf-inference-rnnt.yml @@ -0,0 +1,38 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference rnnt + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-mlperf-inference-rnnt.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.9" ] + backend: [ "pytorch" ] + precision: [ "fp32" ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test MLPerf Inference RNNT + run: | + cm run script --tags=run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet diff --git a/.github/workflows/test-mlperf-inference-tvm.yml b/.github/workflows/test-mlperf-inference-tvm.yml new file mode 100644 index 0000000000..04c624513b --- /dev/null +++ b/.github/workflows/test-mlperf-inference-tvm.yml @@ -0,0 +1,38 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +# We are doing similar test on our tvm tutorial test. So, this test is not necessary +name: MLPerf inference resnet50 using TVM. + +on: + pull_request: + branches: [ "tvm-more" ] + paths: + - '.github/workflows/test-mlperf-inference-tvm.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.10" ] + backend: [ "tvm-onnx" ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: MLPerf Inference ResNet50 using TVM + run: | + cm run script --tags=run,mlperf,inference,generate-run-cmds --hw_name=default --model=resnet50 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --target_qps=1 -v --quiet diff --git a/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml new file mode 100644 index 0000000000..a66e40981c --- /dev/null +++ b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml @@ -0,0 +1,36 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf loadgen with HuggingFace bert onnx fp32 squad model + +on: + pull_request: + branches: [ "master", "dev" ] + paths: + - '.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml' + - 'cm-mlops/**' + - '!cm-mlops/**.md' + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.12", "3.9" ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + cm run script --quiet --tags=get,sys-utils-cm + - name: Test MLPerf loadgen with HuggingFace bert onnx fp32 squad model + run: | + cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx --quiet diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml new file mode 100644 index 0000000000..b2e774a440 --- /dev/null +++ b/.github/workflows/test-qaic-compute-sdk-build.yml @@ -0,0 +1,35 @@ +name: Test Compilation of QAIC Compute SDK (build LLVM from src) + +on: + schedule: + - cron: "1 1 * * 2" + +jobs: + build: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + llvm-version: [ "12.0.1", "13.0.1", "14.0.0", "15.0.6", "16.0.4", "17.0.6" ] + exclude: + - llvm-version: "13.0.1" + - llvm-version: "14.0.0" + - llvm-version: "15.0.6" + - llvm-version: "16.0.4" + - llvm-version: "17.0.6" + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo mlcommons@ck + cm run script --tags=get,sys-utils-cm --quiet + + - name: Test QAIC Compute SDK for compilation + run: | + cm run script --tags=get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-qaic-software-kit.yml b/.github/workflows/test-qaic-software-kit.yml new file mode 100644 index 0000000000..e3a186daae --- /dev/null +++ b/.github/workflows/test-qaic-software-kit.yml @@ -0,0 +1,41 @@ +name: Test QAIC Software kit Compilation + +on: + schedule: + - cron: "1 1 * * 1" + +jobs: + build_ubuntu_20_04: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + compiler: [ "gcc", "llvm" ] + llvm-version: [ "12.0.1", "13.0.1", "14.0.0", "15.0.6" ] + exclude: + - llvm-version: "12.0.1" + - llvm-version: "13.0.1" + - llvm-version: "14.0.0" + compiler: "gcc" + - llvm-version: "15.0.6" + compiler: "gcc" + include: + - llvm-version: " " + compiler: "gcc" + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmind + cm pull repo mlcommons@ck + cm run script --tags=get,sys-utils-cm --quiet + + - name: Test Software Kit for compilation on Ubuntu 20.04 + run: | + cm run script --tags=get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet + cm run script --tags=get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/update-script-dockerfiles.yml b/.github/workflows/update-script-dockerfiles.yml new file mode 100644 index 0000000000..4c626b137e --- /dev/null +++ b/.github/workflows/update-script-dockerfiles.yml @@ -0,0 +1,41 @@ +# This workflow will add/update the default dockerfile for any updated CM scripts +name: Dockerfile update for CM scripts + +on: + push: + branches: [ "master", "dev" ] + paths: + - 'cm-mlops/script/**_cm.json' + +jobs: + dockerfile: + if: github.repository == 'mlcommons/ck' + runs-on: ubuntu-latest + steps: + - name: 'Checkout' + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Get changed files + id: getfile + run: | + echo "files=$(git diff --name-only ${{ github.event.before }} | xargs)" >> $GITHUB_OUTPUT + - name: Update dockerfile + run: | + for file in ${{ steps.getfile.outputs.files }}; do + echo $file + done + python3 -m pip install cmind + cm pull repo --url=https://github.com/${{ github.repository }} --checkout=${{ github.ref_name }} + python3 tests/script/process_dockerfile.py ${{ steps.getfile.outputs.files }} + + FOLDER=`cm find repo mlcommons@ck | cut -d' ' -f3` + + USER=ctuning-admin + EMAIL=admin@ctuning.org + + git config --global user.name "$USER" + git config --global user.email "$EMAIL" + git remote set-url origin https://x-access-token:${{ secrets.ACCESS_TOKEN }}@github.com/${{ github.repository }} + git add *.Dockerfile + git diff-index --quiet HEAD || (git commit -am "Updated dockerfile" && git push) diff --git a/.github/workflows/update-script-readme.yml b/.github/workflows/update-script-readme.yml new file mode 100644 index 0000000000..8e795e4bb2 --- /dev/null +++ b/.github/workflows/update-script-readme.yml @@ -0,0 +1,46 @@ +# This workflow will add/update the README.md files for any updated CM scripts +name: Readme update for CM scripts + +on: + push: + branches: [ "master", "dev" ] + paths: + - 'cm-mlops/script/**_cm.json' + +jobs: + doreadme: + runs-on: ubuntu-latest + if: github.repository == 'mlcommons/ck' + steps: + - name: 'Checkout' + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Get changed files + id: getfile + run: | + echo "files=$(git diff --name-only ${{ github.event.before }} | xargs)" >> $GITHUB_OUTPUT + - name: Update readme + run: | + echo ${{ steps.getfile.outputs.files }} + for file in ${{ steps.getfile.outputs.files }}; do + echo $file + done + python3 -m pip install cmind + cm pull repo --url=https://github.com/${{ github.repository }} --checkout=${{ github.ref_name }} + python3 tests/script/process_readme.py ${{ steps.getfile.outputs.files }} + #REPO=${{ github.repository }} + #CM_REPO=${REPO/\//@} + #FOLDER=`cm find repo ${CM_REPO} | cut -d' ' -f3` + FOLDER=`cm find repo mlcommons@ck | cut -d' ' -f3` + cd $FOLDER + echo "Changed to $FOLDER" + + USER=ctuning-admin + EMAIL=admin@ctuning.org + + git config --global user.name "$USER" + git config --global user.email "$EMAIL" + git remote set-url origin https://x-access-token:${{ secrets.ACCESS_TOKEN }}@github.com/${{ github.repository }} + git add *.md + git diff-index --quiet HEAD || (git commit -am "Updated docs" && git push && echo "Changes pushed") diff --git a/CHANGES.md b/CHANGES.md new file mode 100644 index 0000000000..a24a477b5e --- /dev/null +++ b/CHANGES.md @@ -0,0 +1,213 @@ +Since March 2023, all updates to CM automations are submitted via PRs. +You can follow our PRs at +* https://github.com/ctuning/mlcommons-ck/commits/master +* https://github.com/mlcommons/ck/pulls?q=is%3Apr+is%3Aclosed . + +--- + +### 20230214 + * experiment and graph gui are working now + +### 20230206: + * started prototyping cm run experiment + +### 20230123: + * added simple GUI to CM scripts + +### 20221206: + * added "script_name" to the CM "script" meta to specify any native script name + * added "--script_name" to "cm add script {alias} --script_name=my-native-script.sh" + +### 20221206: + * added CM_SCRIPT_EXTRA_CMD to force some flags to all scripts + +### 20221202: + * major updates for Windows (CL, CUDA, etc) + +### 20221111: + * various fixes for Student Cluster Competition at SuperComputing'22 + +### 20221110: + * added support to push MLPerf results to W&B dashboard + +### 20221103: + * added "cm json2yaml utils" and "cm yaml2json utils" + +### 20221024: + * added --verbose and --time to "cm run script" + +### 20221017: + * removed the need for echo-off script + +### 20221010: + * added cm run script --debug-script-tags to run cmd/bash before native script + * added cm run script --shell to set env and run shell after script execution + +* 20221007: + * added script template (used when adding new scripts) + * major clean up of all scripts + +### 20220916: + * treat alias as tags if spaces: + cm run script "get compiler" is converted to cm run script --tags=get,compiler + * improved gcc detection + * refactored "cm run script" to skip deps in cache if needed + +### 20220906 + * added --print_env flag to "cm run script" to print aggregated env + before running native scripts + * various fixes to support MLPerf automation + +### 20220823 + * various fixes for universal MLPerf inference submission automation + +### 20220803 + * various fixes for TVM and image classification + +### 20220802 + * added "run_script_after_post_deps" to script meta to run script after post deps + (useful to activate python virtual env) + * added "activate-python-venv" script to make it easier to debug Python deps installation + +### 20220722 + * added --accept-license and --skip-system-deps + (converted to env CM_ACCEPT_LICENSE ("True") and CM_SKIP_SYSTEM_DEPS ("True")) + +### 20220719 + * moved relatively stable MLOps automation scripts here + +### 20220718 + * fixed local_env_keys in get-python3 + * added new_env_only_keys to meta to specify which env to keep + * fixed problem with adding tags from the selected script during caching + * added --skip-compile and --skip-run to script (converted to env CM_SKIP_COMPILE and CM_SKIP_RUN) + * fixed local_env_keys in get-python3 + * added new_env_only_keys to get-python3 + +### 20220713 + * added local_env_keys to meta + * added "env" dict to os_info + +### 20220712 + * major script refactoring to support cache tags update from deps + * fixed version min/max propagations in deps + * improvements to support tags from deps + * added tags from deps (python, llvm) + +### 20220708 + * various fixes to handle versions (min/max/default) + * various fixes to avoid contamination of ENV from other scripts + * various fixes to handle versions (min/max/default) + +### 20220705 + * fixes for remembered selections + * added --skip-remembered-selections to "cm run script" + +### 20220704 + * fixed a bug with searching for scripts with variations + * added the possibility to update deps from pre/post processing + * added --extra-cache-tags and --name for "cm run script" + * added prototype of selection caching + * fixed get-python-venv + +### 20220701 + * added dummy "cm test script" + * added "--env" to "cm show cache" to show env and state + * added "cm show cache" + +### 20220629 + * added "detect_version_using_script" in script used to detect python packages + * major fix to properly support multiple scripts with the same tags, caching, selection, etc + * fixed a bug in version comparison (converting string to int) + * added recording of "version" to cache meta + +### 20220628 + * fixed local_env with deps + +### 20220623 + * important update of versions logic + +### 20220621 + * added support for --quiet + * changed CM_NEED_VERSION to CM_VERSION + * added CM_VERSION_MIN, CM_VERSION_MAX + * added cm compare_versions utils --version1=... --version2=... + * added support to detect min/max/correct versions + +### 20220617 + * fixed logic to handle variations (-_): https://github.com/mlcommons/ck/issues/243 + +### 20220616 + * changed "cached" to "cache" automation + +### 20220615 + * major update of script (remove parallel env/new_env and state/new_state). + keep global env & state and detect changes automatically + * major simplification of "script" + * removed "installed" to be more understandable + * added "cached" to be more understandable + +### 20220609 + * added "versions" key to the CM script meta + it works similar to "variations" and is forced by --version + * changed "ic" to "script" in "experiment" automation + +### 20220608 + * updated "variations" logic in "script"! + meta['default_variation'] (str): only one of many + meta['default_variations'] (list): multiple choices + * deprecated "ic" automation. Use "script" instead! + +### 20220607 + * added strip_folders to utils/unzip_file + * fixed minor bugs in CM script + +### 20220606 + * added "name" key to deps (list of names and UIDs) + * added "add_deps_tags" in variations and in CMD ({"name":"tag(s)"}) + * added "deps" to variations to be merged with the list of current deps + * added --input and --output for cm run script converted to env CM_INPUT and CM_OUTPUT + useful to create interactive CM scripts to process files + * Added prototype-test-deps-variations-tags to play with deps, variations, tags + +### 20220605 + * clean tmp files in "script" automation by default and keep them using --dirty flag + +### 20220603 + * added "skip" and "deps" to postprocess to call other scripts. + For example call install LLVM if detect LLVM fails... + * added "script" automation to substitute less intuitive "ic" + * Improved LLVM detection and installation + * Added example of image corner detection + * Added updated script entries + +### 20220601 + * added version, path, skip_install and post_deps to IC + * added --new to IC to detect new components + * Updating mechanisms to install and/or detect LLVM + * added support to install prebuilt LLVM for Linux, MacOs, Windows + +### 20220530 + * updated ic automation to read tmp-run-state.json + and merge it with the "new_state" dict + +### 20220524 + * changed directory ck2-repo-mlops to cm-devops + +### 20220517 + * Changed CM_PATH_LIST to +PATH + * Added general support for +ENV that is expanded to ENV=val1;val2;...:${ENV} + +### 20220511 + * Better handle exceptions in utils.download_file + * Added support for variations in intelligent components (ic) + * Fixed bugs in IC + * Added "_" prefix in tags to specify variation of IC + * Record env.sh in "installed artifacts even if bat file is not executed + * Fixed experiment directory naming on Windows + * Added "cm version ic" (#233) + * Added prototype of ic::prototype-get-ml-model-resnet50-onnx with variations + * Added prototype of ic::prototype-get-imagenet-val with variations + * Added prototype of ic::prototype-get-imagenet-aux with variations + * Added prototype of ic::prototype-get-llvm + * Added prototype of ic::prototype-get-tvm diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6e05ae89d7..5219a7fc94 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,5 @@ +# Collective Mind project aka CM + ## Contributing The best way to contribute to the MLCommons is to get involved with one of our many project communities. You find more information about getting involved with MLCommons [here](https://mlcommons.org/en/get-involved/#getting-started). @@ -6,4 +8,70 @@ Generally we encourage people to become a MLCommons member if they wish to contr Regardless of if you are a member, your organization needs to sign the MLCommons CLA. Please fill out this [CLA sign up form](https://forms.gle/Ew1KkBVpyeJDuRw67) form to get started. -MLCommons project work is tracked with issue trackers and pull requests. Modify the project in your own fork and issue a pull request once you want other developers to take a look at what you have done and discuss the proposed changes. Ensure that cla-bot and other checks pass for your Pull requests. \ No newline at end of file +MLCommons project work is tracked with issue trackers and pull requests. Modify the project in your own fork and issue a pull request once you want other developers to take a look at what you have done and discuss the proposed changes. Ensure that cla-bot and other checks pass for your Pull requests. + +## CM project coordinator + +* [Grigori Fursin](https://cKnowledge.org/gfursin) + +## CM authors + +* [Grigori Fursin](https://cKnowledge.org/gfursin) (CM core and CM scripts for MLOps) +* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) (CM scripts for MLPerf and MLOps) + +## CM contributors in alphabetical order (suggestions and/or scripts) + +* Resmi Arjun +* Alice Cheng (Nvidia) +* Jiahao Chen (MIT) +* Ramesh N Chukka (Intel) +* Ray DeMoss (One Stop Systems) +* Ryan T DeRue (Purdue University) +* Himanshu Dutta (Indian Institute of Technology) +* Justin Faust (One Stop Systems) +* Diane Feddema (Red Hat) +* Leonid Fursin (United Silicon Carbide) +* Anirban Ghosh (Nvidia) +* Michael Goin (Neural Magic) +* Jose Armando Hernandez (Paris Saclay University) +* Mehrdad Hessar (OctoML) +* Miro Hodak (AMD) +* Sachin Idgunji (Nvidia) +* Tom Jablin (Google) +* Nino Jacob +* David Kanter (MLCommons) +* Jason Knight (OctoML) +* Ilya Kozulin (Deelvin) +* @makaveli10 (Collabora) +* Steve Leak(NERSC) +* Amija Maji (Purdue University) +* Peter Mattson (Google, MLCommons) +* Kasper Mecklenburg (Arm) +* Pablo Gonzalez Mesa +* Thierry Moreau (OctoML) +* Sachin Mudaliyar +* Stanley Mwangi (Microsoft) +* Ashwin Nanjappa (Nvidia) +* Hai Ah Nam (NERSC) +* Nandeeka Nayak (UIUC) +* Datta Nimmaturi (Nutanix) +* Lakshman Patel +* Arun Tejusve Raghunath Rajan (Cruise) +* Vijay Janapa Reddi (Harvard University) +* Andrew Reusch (OctoML) +* Anandhu S (Kerala Technical University) +* Sergey Serebryakov (HPE) +* Warren Schultz (Principled Technologies) +* Amrutha Sheleenderan (Kerala Technical University) +* Byoungjun Seo (TTA) +* Aditya Kumar Shaw (Indian Institute of Science) +* Ilya Slavutin (Deelvin) +* Jinho Suh (Nvidia) +* Badhri Narayanan Suresh (Intel) +* David Tafur (MLCommons) +* Chloe Tessier +* Gaurav Verma (Stony Brook University) +* Scott Wasson (MLCommons) +* Haoyang Zhang (UIUC) +* Bojian Zheng (University of Toronto) +* Thomas Zhu (Oxford University) diff --git a/LICENSE.third-party.md b/LICENSE.third-party.md new file mode 100644 index 0000000000..faa0084585 --- /dev/null +++ b/LICENSE.third-party.md @@ -0,0 +1 @@ +This CM repository may contain CM scripts with third-party files licensed under Apache2, BSD or MIT license. diff --git a/README.md b/README.md new file mode 100644 index 0000000000..cde1576187 --- /dev/null +++ b/README.md @@ -0,0 +1,29 @@ +# CM repository with cross-platform scripts for DevOps, MLOps, AIOps and MLPerf + +[![License](https://img.shields.io/badge/License-Apache%202.0-green)](LICENSE.md) +[![Powered by CM](https://img.shields.io/badge/Powered_by-MLCommons%20CM-blue)](https://github.com/mlcommons/ck). + +This repository contains reusable and cross-platform automation recipes to run DevOps, MLOps, AIOps and MLPerf +via a simple and human-readable [Collective Mind interface (CM)](https://github.com/mlcommons/ck) +while adapting to different opearting systems, software and hardware. + +All СM scripts have a simple Python API, extensible JSON/YAML meta description +and unifed input/output to make them reusable in different projects either individually +or by chaining them together into portable automation workflows, applications +and web services adaptable to continuously changing models, data sets, software and hardware. + +These automation recipes are being developed and maintained +by the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +with [great contributions](CONTRIBUTING.md) from the community. + +### Catalog + +See the automatically generated catalog [online](https://access.cknowledge.org/playground/?action=scripts). + +### License + +[Apache 2.0](LICENSE.md) + +### Copyright + +2022-2024 [MLCommons](https://mlcommons.org) diff --git a/automation/cache/README-extra.md b/automation/cache/README-extra.md new file mode 100644 index 0000000000..84d2741794 --- /dev/null +++ b/automation/cache/README-extra.md @@ -0,0 +1,71 @@ +[ [Back to index](../../../docs/README.md) ] + +# CM "cache" automation + +*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md) + and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts.* + +## CM script CLI + +Whenever a [given CM script]() caches the output, you can find it + +Whenever a [CM script](https://access.cknowledge.org/playground/?action=scripts) +caches its output (such as downloaded model or pre-processed data set or built code), +you can find it using the CM "cache" automation as follows: + +```bash +cm show cache +``` + +You can prune cache entries by tags and variations: +```bash +cm show cache --tags=ml-model +cm show cache --tags=python +``` + +You can find a path to a given cache artifact as follows: +```bash +cm find cache --tags=ml-model,bert +``` + +You can delete one or more cache artifacts as follows: +```bash +cm rm cache --tags=ml-model +``` + +You can skip user prompt by adding `-f` flag as follows: +```bash +cm rm cache --tags=ml-model -f +``` + +You can clean the whole cache as follows: +```bash +cm rm cache -f +``` + +## CM python API + +You can access the same functionality via CM Python API as follows: + +```python + +import cmind + +output = cmind.access({'action':'show', + 'automation':'cache,541d6f712a6b464e'}) + +if output['return']>0: + cmind.error(output) + +artifacts = output['list'] + +for artifact in artifacts: + print ('') + print (artifact.path) + print (artifact.meta) + +``` + +## Related + +* [CM "script" automation](../script/README-extra.md) diff --git a/automation/cache/README.md b/automation/cache/README.md new file mode 100644 index 0000000000..0a3114d3b5 --- /dev/null +++ b/automation/cache/README.md @@ -0,0 +1,87 @@ +*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15)) + * CM CLI with UID: ```cm test cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### show + + * CM CLI: ```cm show cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54)) + * CM CLI with UID: ```cm show cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'show' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### search + + * CM CLI: ```cm search cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153)) + * CM CLI with UID: ```cm search cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'search' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### copy_to_remote + + * CM CLI: ```cm copy_to_remote cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186)) + * CM CLI with UID: ```cm copy_to_remote cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'copy_to_remote' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/cache/_cm.json b/automation/cache/_cm.json new file mode 100644 index 0000000000..ac383f937c --- /dev/null +++ b/automation/cache/_cm.json @@ -0,0 +1,12 @@ +{ + "alias": "cache", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Caching cross-platform CM scripts", + "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)", + "sort": 900, + "tags": [ + "automation" + ], + "uid": "541d6f712a6b464e" +} diff --git a/automation/cache/module.py b/automation/cache/module.py new file mode 100644 index 0000000000..e162d85a75 --- /dev/null +++ b/automation/cache/module.py @@ -0,0 +1,212 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} + + ############################################################ + def show(self, i): + """ + Show cache + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (env) (bool): if True, show env from cm-cached-state.json + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + import json + + # Check parsed automation + if 'parsed_automation' not in i: + return {'return':1, 'error':'automation is not specified'} + + console = i.get('out') == 'con' + + show_env = i.get('env', False) + +# Moved to search function +# # Check simplified CMD: cm show cache "get python" +# # If artifact has spaces, treat them as tags! +# artifact = i.get('artifact','') +# tags = i.get('tags','').strip() +# if ' ' in artifact or ',' in artifact: +# del(i['artifact']) +# if 'parsed_artifact' in i: del(i['parsed_artifact']) +# +# new_tags = artifact.replace(' ',',') +# tags = new_tags if tags=='' else new_tags+','+tags +# +# i['tags'] = tags + + # Find CM artifact(s) + i['out'] = None + r = self.search(i) + + if r['return']>0: return r + + lst = r['list'] + for artifact in sorted(lst, key = lambda x: sorted(x.meta['tags'])): +# for artifact in lst: + path = artifact.path + meta = artifact.meta + original_meta = artifact.original_meta + + alias = meta.get('alias','') + uid = meta.get('uid','') + + tags = meta.get('tags',[]) + tags1 = sorted([x for x in tags if not x.startswith('_')]) + tags2 = sorted([x for x in tags if x.startswith('_')]) + tags = tags1 + tags2 + + version = meta.get('version','') + + if console: + print ('') +# print ('* UID: {}'.format(uid)) + print ('* Tags: {}'.format(','.join(tags))) + print (' Path: {}'.format(path)) + if version!='': + print (' Version: {}'.format(version)) + + if show_env and console: + path_to_cached_state_file = os.path.join(path, 'cm-cached-state.json') + + if os.path.isfile(path_to_cached_state_file): + r = utils.load_json(file_name = path_to_cached_state_file) + if r['return']>0: return r + + # Update env and state from cache! + cached_state = r['meta'] + + new_env = cached_state.get('new_env', {}) + if len(new_env)>0: + print (' New env:') + print (json.dumps(new_env, indent=6, sort_keys=True).replace('{','').replace('}','')) + + new_state = cached_state.get('new_state', {}) + if len(new_state)>0: + print (' New state:') + print (json.dumps(new_env, indent=6, sort_keys=True)) + + return {'return':0, 'list': lst} + + ############################################################ + def search(self, i): + """ + Overriding the automation search function to add support for a simplified CMD with tags with spaces + + TBD: add input/output description + """ + + # Check simplified CMD: cm show cache "get python" + # If artifact has spaces, treat them as tags! + artifact = i.get('artifact','') + tags = i.get('tags','') + # Tags may be a list (if comes internally from CM scripts) or string if comes from CMD + if type(tags)!=list: + tags = tags.strip() + if ' ' in artifact:# or ',' in artifact: + del(i['artifact']) + if 'parsed_artifact' in i: del(i['parsed_artifact']) + + new_tags = artifact.replace(' ',',') + tags = new_tags if tags=='' else new_tags+','+tags + + i['tags'] = tags + + # Force automation when reruning access with processed input + i['automation']='cache,541d6f712a6b464e' + i['action']='search' + i['common'] = True # Avoid recursion - use internal CM add function to add the script artifact + + # Find CM artifact(s) + return self.cmind.access(i) + + + ############################################################ + def copy_to_remote(self, i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module(self, __file__, 'module_misc', 'copy_to_remote', i) diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py new file mode 100644 index 0000000000..cc4a6ac31b --- /dev/null +++ b/automation/cache/module_misc.py @@ -0,0 +1,98 @@ +import os +from cmind import utils + + +############################################################ +def copy_to_remote(i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + self_module = i['self_module'] + + remote_host = i.get('remote_host') + if not remote_host: + return {'return':1, 'error': 'Please input remote host_name/IP via --remote_host'} + remote_cm_repos_location = i.get('remote_cm_repos_location', os.path.join("/home", os.getlogin(), "CM", "repos")) + remote_cm_cache_location = os.path.join(remote_cm_repos_location, "local", "cache") + + remote_port = i.get('remote_port', '22') + remote_user = i.get('remote_user', os.getlogin()) + + tag_string = i['tags'] + tag_string += ",-tmp" + + cm_input = {'action': 'show', + 'automation': 'cache', + 'tags': f'{tag_string}', + 'quiet': True + } + r = self_module.cmind.access(cm_input) + if r['return'] > 0: + return r + + if len(r['list']) == 0: + pass #fixme + elif len(r['list']) > 1: + print("Multiple cache entries found: ") + for k in sorted(r['list'], key = lambda x: x.meta.get('alias','')): + print(k.path) + x = input("Would you like to copy them all? Y/n: ") + if x.lower() == 'n': + return {'return': 0} + + import json + + for k in sorted(r['list'], key = lambda x: x.meta.get('alias','')): + path = k.path + cacheid = os.path.basename(path) + + copy_cmd = f"rsync -avz --exclude cm-cached-state.json -e 'ssh -p {remote_port}' {path} {remote_user}@{remote_host}:{remote_cm_cache_location}" + print(copy_cmd) + os.system(copy_cmd) + + cm_cached_state_json_file = os.path.join(path, "cm-cached-state.json") + if not os.path.exists(cm_cached_state_json_file): + return {'return':1, 'error': f'cm-cached-state.json file missing in {path}'} + + with open(cm_cached_state_json_file, "r") as f: + cm_cached_state = json.load(f) + + new_env = cm_cached_state['new_env'] + new_state = cm_cached_state['new_state'] # Todo fix new state + cm_repos_path = os.environ.get('CM_REPOS', os.path.join(os.path.expanduser("~"), "CM", "repos")) + cm_cache_path = os.path.realpath(os.path.join(cm_repos_path, "local", "cache")) + + for key,val in new_env.items(): + if type(val) == str and cm_cache_path in val: + new_env[key] = val.replace(cm_cache_path, remote_cm_cache_location) + + with open("tmp_remote_cached_state.json", "w") as f: + json.dump(cm_cached_state, f, indent=2) + + remote_cached_state_file_location = os.path.join(remote_cm_cache_location, cacheid, "cm-cached-state.json") + copy_cmd = f"rsync -avz -e 'ssh -p {remote_port}' tmp_remote_cached_state.json {remote_user}@{remote_host}:{remote_cached_state_file_location}" + print(copy_cmd) + os.system(copy_cmd) + + return {'return':0} diff --git a/automation/cfg/README.md b/automation/cfg/README.md new file mode 100644 index 0000000000..3c82852c8d --- /dev/null +++ b/automation/cfg/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test cfg``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)) + * CM CLI with UID: ```cm test cfg,88dce9c160324c5d``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'cfg,88dce9c160324c5d' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/cfg/_cm.json b/automation/cfg/_cm.json new file mode 100644 index 0000000000..27f80fbd40 --- /dev/null +++ b/automation/cfg/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "cfg", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "88dce9c160324c5d" +} diff --git a/automation/cfg/module.py b/automation/cfg/module.py new file mode 100644 index 0000000000..be8d6e7b1d --- /dev/null +++ b/automation/cfg/module.py @@ -0,0 +1,52 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} diff --git a/automation/challenge/README.md b/automation/challenge/README.md new file mode 100644 index 0000000000..2db03e8b16 --- /dev/null +++ b/automation/challenge/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test challenge``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15)) + * CM CLI with UID: ```cm test challenge,3d84abd768f34e08``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'challenge,3d84abd768f34e08' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/challenge/_cm.json b/automation/challenge/_cm.json new file mode 100644 index 0000000000..a4f4164527 --- /dev/null +++ b/automation/challenge/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "challenge", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "3d84abd768f34e08" +} diff --git a/automation/challenge/module.py b/automation/challenge/module.py new file mode 100644 index 0000000000..be8d6e7b1d --- /dev/null +++ b/automation/challenge/module.py @@ -0,0 +1,52 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} diff --git a/automation/contributor/README.md b/automation/contributor/README.md new file mode 100644 index 0000000000..df1f4e3d6f --- /dev/null +++ b/automation/contributor/README.md @@ -0,0 +1,47 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test contributor``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15)) + * CM CLI with UID: ```cm test contributor,68eae17b590d4f8f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'contributor,68eae17b590d4f8f' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### add + + * CM CLI: ```cm add contributor``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54)) + * CM CLI with UID: ```cm add contributor,68eae17b590d4f8f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'add' + 'automation':'contributor,68eae17b590d4f8f' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/contributor/_cm.json b/automation/contributor/_cm.json new file mode 100644 index 0000000000..008f7d54c9 --- /dev/null +++ b/automation/contributor/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "contributor", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "68eae17b590d4f8f" +} diff --git a/automation/contributor/module.py b/automation/contributor/module.py new file mode 100644 index 0000000000..82807638f8 --- /dev/null +++ b/automation/contributor/module.py @@ -0,0 +1,153 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} + + ############################################################ + def add(self, i): + """ + Add CM script + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + self_automation = self.meta['alias']+','+self.meta['uid'] + + console = i.get('out') == 'con' + + artifact = i.get('artifact','') + if ':' not in artifact: + artifact = 'mlcommons@ck:'+artifact + + j = artifact.find(':') + name = artifact[j+1:] + + # Check info + if name == '': + name = input('Enter your name: ').strip() + if name == '': + return {'return':1, 'error':'name can\'t be empty'} + + artifact += name + + # Check if doesn't exist + r = self.cmind.access({'action':'find', + 'automation':self_automation, + 'artifact':artifact}) + if r['return']>0: return r + elif r['return']==0 and len(r['list'])>0: + return {'return':1, 'error':'CM artifact with name {} already exists in {}'.format(name, r['list'][0].path)} + + meta = i.get('meta',{}) + + # Prepare meta + org = meta.get('organization','') + if org=='': + org = input('Enter your organization (optional): ').strip() + + url = input('Enter your webpage (optional): ').strip() + + tags = input('Enter tags of your challenges separate by comma (you can add them later): ').strip() + + if meta.get('name','')=='': + meta = {'name':name} + + if org!='': + meta['organization'] = org + + if url!='': + meta['urls'] = [url] + + if tags!='': + meta['ongoing'] = tags.split(',') + + # Add placeholder (use common action) + i['out'] = 'con' + i['common'] = True # Avoid recursion - use internal CM add function to add the script artifact + + i['action'] = 'add' + i['automation'] = self_automation + i['artifact'] = artifact + + i['meta'] = meta + + print ('') + + r = self.cmind.access(i) + if r['return']>0: return r + + path = r['path'] + + path2 = os.path.dirname(path) + + print ('') + print ('Please go to {}, add your directory to Git, commit and create PR:'.format(path2)) + print ('') + print ('cd {}'.format(path2)) + print ('git add "{}"'.format(name)) + print ('git commit "{}"'.format(name)) + print ('') + print ('Please join https://discord.gg/JjWNWXKxwT to discuss challenges!') + print ('Looking forward to your contributions!') + + return r diff --git a/automation/data/_cm.json b/automation/data/_cm.json new file mode 100644 index 0000000000..7dd9a139f3 --- /dev/null +++ b/automation/data/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "data", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "84d8ef6914bf4d78" +} diff --git a/automation/data/module.py b/automation/data/module.py new file mode 100644 index 0000000000..be8d6e7b1d --- /dev/null +++ b/automation/data/module.py @@ -0,0 +1,52 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} diff --git a/automation/docker/README.md b/automation/docker/README.md new file mode 100644 index 0000000000..c6ef9a3842 --- /dev/null +++ b/automation/docker/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test docker``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15)) + * CM CLI with UID: ```cm test docker,2d90be7cab6e4d9f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'docker,2d90be7cab6e4d9f' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/docker/_cm.json b/automation/docker/_cm.json new file mode 100644 index 0000000000..11a5085d0e --- /dev/null +++ b/automation/docker/_cm.json @@ -0,0 +1,11 @@ +{ + "alias": "docker", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Managing modular docker containers (under development)", + "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)", + "tags": [ + "automation" + ], + "uid": "2d90be7cab6e4d9f" +} diff --git a/automation/docker/module.py b/automation/docker/module.py new file mode 100644 index 0000000000..aaf0f7802c --- /dev/null +++ b/automation/docker/module.py @@ -0,0 +1,51 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + CM "docker" automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + (artifact) (str): artifact as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} diff --git a/automation/docs/_cm.json b/automation/docs/_cm.json new file mode 100644 index 0000000000..6945baccaf --- /dev/null +++ b/automation/docs/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "docs", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "9558c9e6ca124065" +} diff --git a/automation/docs/module.py b/automation/docs/module.py new file mode 100644 index 0000000000..be8d6e7b1d --- /dev/null +++ b/automation/docs/module.py @@ -0,0 +1,52 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} diff --git a/automation/experiment/README-extra.md b/automation/experiment/README-extra.md new file mode 100644 index 0000000000..c098acc14e --- /dev/null +++ b/automation/experiment/README-extra.md @@ -0,0 +1,315 @@ +[ [Back to index](../../../docs/README.md) ] + +
+Click here to see the table of contents. + +* [CM "experiment" automation](#cm-"experiment"-automation) + * [Introducing CM experiment automation](#introducing-cm-experiment-automation) + * [Installing CM with ResearchOps/DevOps/MLOps automations](#installing-cm-with-researchops/devops/mlops-automations) + * [Understanding CM experiments](#understanding-cm-experiments) + * [Exploring combinations of parameters (autotuning, design space exploration)](#exploring-combinations-of-parameters-autotuning-design-space-exploration) + * [Aggregating and unifying results](#aggregating-and-unifying-results) + * [Visualizing results](#visualizing-results) + * [Sharing experiments with the community](#sharing-experiments-with-the-community) + * [Running CM experiments with CM scripts](#running-cm-experiments-with-cm-scripts) + * [Further community developments](#further-community-developments) + +
+ +# CM "experiment" automation + +*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md), + [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) + and [CM scripts](../script/README-extra.md) to understand CM motivation and concepts. + You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md) + to run some applications and benchmarks on your platform using CM scripts.* + +## Introducing CM experiment automation + + +Researchers, engineers and students spend considerable amount of their time experimenting with +many different settings of applications, tools, compilers, software and hardware +to find the optimal combination suitable for their use cases. + +Based on their feedback, our [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +started developing a CM automation called "experiment". +The goal is to provide a common interface to run, record, share, visualize and reproduce experiments +on any platform with any software, hardware and data. + +The community helped us test a prototype of our "experiment" automation to record results in a unified CM format +from [several MLPerf benchmarks](https://github.com/mlcommons/cm4mlperf-results) +including [MLPerf inference](https://github.com/mlcommons/inference) and [MLPerf Tiny](https://github.com/mlcommons/tiny), +visualize them at the [MLCommons CM platform](https://access.cknowledge.org/playground/?action=experiments&tags=all), +and improve them by the community via [public benchmarking, optimization and reproducibility challenges](https://access.cknowledge.org/playground/?action=challenges). + + + +## Installing CM with ResearchOps/DevOps/MLOps automations + +This CM automation is available in the most commonly used `mlcommons@ck` repository. + +First, install CM automation language as described [here](https://github.com/mlcommons/ck/blob/master/docs/installation.md). +Then, install or update this repository as follows: +```bash +cm pull repo mlcommons@ck +``` + +You can now test that CM experiment automation is available as follows: +```bash +cm run experiment --help +``` +or using `cme` shortcut in CM V1.4.1+ +```bash +cme --help +``` + + + +## Understanding CM experiments + +CM experiment simply wraps any user command line, creates an associated CM `experiment` artifact with a random ID (16 low case HEX characters) +and some user tags in `_cm.json`, creates extra `{date}{time}` subdirectory with `cm-input.json` file with CM input, +and executes the user command line inside an extra subdirectory with another random ID as shown below. + +The following command will print "Hello World!" while recording all the provenance in CM format in the local CM repository: + +```bash +cme --tags=my,experiment,hello-world -- echo "Hello World!" +``` +or +```bash +cm run experiment --tags=my,experiment,hello-world -- echo "Hello World!" +``` + +You should see the output similar to the following: +```bash + +Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945 +Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466 +================================================================ +Experiment step: 1 out of 1 + +Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466\7ed0ea0edd6b4dd7 + +"Hello World!" +``` + +You can find and explore the newly created CM artifact as follows: +```bash +cm find experiment --tags=my,experiment,hello-world +``` +or using UID +```bash +cm find experiment b83a1fb24dbf4945 +``` + +When running the same experiment again, CM will find existing artifact by tags and create new {date}{time} directory there: +```bash +cme --tags=my,experiment,hello-world -- echo "Hello World!" + +Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945 +Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210 +================================================================ +Experiment step: 1 out of 1 + +Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210\7ed0ea0edd6b4dd7 + +"Hello World!" +``` + +You can now replay this experiment as follows: +```bash +cm replay experiment --tags=my,experiment,hello-world +``` + +Note that you can obtain current directory where you called CM +(rather than the CM experiment artifact directory) via {{CD}} variable as follows: +```bash +cme --tags=my,experiment,hello-world -- echo {{CD}} +``` + +You can also record experiments in another CM repository instead of the `local` one as follows: +```bash +cm list repo +cme {CM repository from above list}: --tags=my,experiment,hello-world -- echo {{CD}} +``` + +Finally, you can force a specific artifact name instead of some random ID as follows: +```bash +cme {my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}} +``` +or with given repository +```bash +cme {CM repository from above list}:{my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}} +``` + +## Exploring combinations of parameters (autotuning, design space exploration) + +One of the most common tasks is computer engineering (and other sciences) +is to explore various combinations of parameters of some applications +and systems to select the optimal ones to trade off performance, accuracy, +power consumption, memory usage and other characteristics. + +As a starting point, we have implemented a very simple explorer as a Cartesian product +of any number of specified variables that are passed to a user command line via double curly braces `{{VAR}}` similar to GitHub. + +You just need to create a simple JSON file `cm-input.json` to describe sets/ranges for each variable as follows: +```json +{ + "explore": { + "VAR1": [ + 1, + 2, + 3 + ], + "VAR2": [ + "a", + "b" + ], + "VAR3": "[2**i for i in range(0,6)]" + } +} +``` + +or YAML `cm-input.yaml`: + +```yaml +explore: + VAR1: [1,2,3] + VAR2: ["a","b"] + VAR3: "[2**i for i in range(0,6)]" +``` + +You can then run the following example to see all iterations: +```bash +cm run experiment --tags=my,experiment,hello-world @test_input.yaml \ + -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%% +``` + +Note that you can also define a Python list of range for other variables +directly in the command line as demonstrated in above example for `VAR4` - `{{VAR4{['xx','yy','zz']}}}`. + +CM will create or reuse experiment artifact with tags `my,experiment,hello-world` +and will then iterate in a Cartesian product of all detected variables. + +For each iteration, CM will create a `{date}{time}` subdirectory in a given experiment artifact +and will then run a user command line with substituted variables there. + +You can then replay any of the exploration experiment as follows: +```bash +cm replay experiment --tags={tags} --dir={sub directory} +``` + + + +## Aggregating and unifying results + +Users can expose any information such as measured characteristics of their applications and/or systems (performance, +hardware or OS state, accuracy, internal parameters, etc) to CM for further analysis and visualization +by generating a JSON `cm-result.json` file with any dictionary. + +If this file exists after executing a user command, CM will load it after each experiment or exploration step, +and merge it with a list in a common `cm-result.json` in `{date}{time}` directory for this experiment. + + + +## Visualizing results + +Users can now visualize multiple experiments using the CM GUI script as follows: +```bash +cm run script "gui _graph" --exp_tags=my,experiment,hello-world +``` + +This script will search for all CM experiment entries with these tags, read all `cm-result.json` files, +detect all keys used in result dictionaries, let users select these keys for X and Y axes +to prepare a 2D graph using a popular [StreamLit library](https://streamlit.io), add derived metrics and set constraints +as shown in the following example for one of the official [Tiny MLPerf submissions](https://github.com/mlcommons/tiny): + +![](../../script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png) + + + + + + +## Sharing experiments with the community + +It is possible to share experiments with a common automation interface +in your own GitHub/GitLab repository, container and zip/tar file +in a non-intrusive way. + +You need to go to a root directory of your project and initialize CM repository there +with a unique name "my-cool-project" as follows: + +```bash +cm init repo my-cool-project --path=. --prefix=cmr +``` + +This command will create a `cmr.yaml` file with a description and unique ID of this repository, +and will register it in the CM. Note that all CM automations and artifacts will be located +in the `cmr` sub-directory to avoid contaminating your project. They can be deleted +or moved to another project at any time. + +You can now record new experiments in this repository by adding `my-cool-project:` to the cm experiment command line as follows: +```bash +cm run experiment my-cool-project: --tags=my,experiment,hello-world -- echo "Hello World!" +``` + +You can also move a set of existing experiments from the `local` CM repository to the new one as follows: +```bash +cm move experiment my-cool-project: --tags=my,experiment,hello-world +``` + +You can continue replaying these experiments in the way no matter what CM repository they are in: +```bash +cm replay experiment --tags=my,experiment,hello-world +``` + +or you can enforce a specific repository as follows: +```bash +cm replay experiment my-cool-project: --tags=my,experiment,hello-world +``` + + + + + +## Running CM experiments with CM scripts + +User scripts and tools may contain some hardwired local paths that may prevent replaying them on another platform. +In such case, we suggest you to use [CM scripts](/../script/README-extra.md). + +CM scripts solve this problem by wrapping existing user scripts and tools and detecting/resolving paths +to specific tools and artifacts on a given user platform. + +You can find example of using CM scripts with CM experiments in [this directory](tests) - see `test3.bat` or `test3.sh`: +```bash +cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} +``` + +You can use the following environment variables to pass the current path, +different paths to experiment entries and the number of experiment to your CM script: +* {{CD}} +* {{CM_EXPERIMENT_STEP}} +* {{CM_EXPERIMENT_PATH}} +* {{CM_EXPERIMENT_PATH2}} +* {{CM_EXPERIMENT_PATH3}} + + +Feel free to check [this tutorial](../../../docs/tutorials/common-interface-to-reproduce-research-projects.md) +to add CM scripts for your own applications, tools and native scripts. + +We are currently extending CM experiments and CM scripts for MLPerf benchmarks +to automate benchmarking, optimization and design space exploration of ML/AI systems +on any software and hardware - please stay tuned via our [Discord server](https://discord.gg/JjWNWXKxwT). + + + +## Further community developments + +We are developing this experiment automation in CM to help the community share, reproduce and reuse experiments +using a common, simple, human readable, and portable [automation language](../../../docs/README.md). + +Join our [Discord server](https://discord.gg/JjWNWXKxwT) from the [MLCommons task force on automation and reproducibility](../taskforce.md) +to participate in the unification and extension of this interface and CM scripts for diverse research projects and tools. + diff --git a/automation/experiment/README.md b/automation/experiment/README.md new file mode 100644 index 0000000000..13ea6ec1a5 --- /dev/null +++ b/automation/experiment/README.md @@ -0,0 +1,87 @@ +*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)) + * CM CLI with UID: ```cm test experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### run + + * CM CLI: ```cm run experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)) + * CM CLI with UID: ```cm run experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'run' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### rerun + + * CM CLI: ```cm rerun experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)) + * CM CLI with UID: ```cm rerun experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'rerun' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### replay + + * CM CLI: ```cm replay experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)) + * CM CLI with UID: ```cm replay experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'replay' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/experiment/_cm.json b/automation/experiment/_cm.json new file mode 100644 index 0000000000..49bb0e6166 --- /dev/null +++ b/automation/experiment/_cm.json @@ -0,0 +1,11 @@ +{ + "alias": "experiment", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Managing and reproducing experiments (under development)", + "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", + "tags": [ + "automation" + ], + "uid": "a0a2d123ef064bcb" +} diff --git a/automation/experiment/module.py b/automation/experiment/module.py new file mode 100644 index 0000000000..3c6490d0d6 --- /dev/null +++ b/automation/experiment/module.py @@ -0,0 +1,804 @@ +import os +import itertools +import copy +import json + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + CM "experiment" automation actions + """ + + CM_RESULT_FILE = 'cm-result.json' + CM_INPUT_FILE = 'cm-input.json' + CM_OUTPUT_FILE = 'cm-output.json' + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} + + + + + + ############################################################ + def run(self, i): + """ + Run experiment + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): experiment artifact name (can include repository separated by :) + (tags) (str): experiment tags separated by comma + + (dir) (str): force recording into a specific directory + + + (script) (str): find and run CM script by name + (s) + + (script_tags) (str): find and run CM script by tags + (stags) + + (rerun) (bool): if True, rerun experiment in a given entry/directory instead of creating a new one... + + (explore) (dict): exploration dictionary + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + # Copy of original input + ii_copy = copy.deepcopy(i) + cur_dir = os.getcwd() + + # Find or add artifact based on repo/alias/tags + r = self._find_or_add_artifact(i) + if r['return']>0: return r + + experiment = r['experiment'] + + console = i.get('out','')=='con' + + # Print experiment folder + experiment_path = experiment.path + + if console: + print ('') + print ('Path to CM experiment artifact: {}'.format(experiment_path)) + + + # Get directory with datetime + datetime = i.get('dir','') + + if datetime == '' and i.get('rerun', False): + # Check if already some dir exist + + directories = os.listdir(experiment_path) + + datetimes = sorted([f for f in directories if os.path.isfile(os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) + + if len(datetimes)==1: + datetime = datetimes[0] + elif len(datetimes)>1: + print ('') + print ('Select experiment:') + + datetimes = sorted(datetimes) + + num = 0 + print ('') + for d in datetimes: + print ('{}) {}'.format(num, d.replace('.',' '))) + num += 1 + + if not console: + return {'return':1, 'error':'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'} + + print ('') + x=input('Make your selection or press Enter for 0: ') + + x=x.strip() + if x=='': x='0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + datetime = datetimes[selection] + + + if datetime!='': + experiment_path2 = os.path.join(experiment_path, datetime) + else: + num = 0 + found = False + + while not found: + r = utils.get_current_date_time({}) + if r['return']>0: return r + + datetime = r['iso_datetime'].replace(':','-').replace('T','.') + + if num>0: + datetime+='.'+str(num) + + experiment_path2 = os.path.join(experiment_path, datetime) + + if not os.path.isdir(experiment_path2): + found = True + break + + num+=1 + + # Check/create directory with date_time + if not os.path.isdir(experiment_path2): + os.makedirs(experiment_path2) + + # Change current path + print ('Path to experiment: {}'.format(experiment_path2)) + + os.chdir(experiment_path2) + + # Record experiment input with possible exploration + experiment_input_file = os.path.join(experiment_path2, self.CM_INPUT_FILE) + experiment_result_file = os.path.join(experiment_path2, self.CM_RESULT_FILE) + + # Clean original input + for k in ['parsed_artifact', 'parsed_automation', 'cmd']: + if k in ii_copy: + del(ii_copy[k]) + + r = utils.save_json(file_name=experiment_input_file, meta=ii_copy) + if r['return']>0: return r + + # Prepare run command + cmd = '' + + unparsed = i.get('unparsed_cmd', []) + if len(unparsed)>0: + for u in unparsed: + if ' ' in u: u='"'+u+'"' + cmd+=' '+u + + cmd=cmd.strip() + + # Prepare script run + env = i.get('env', {}) + + ii = {'action':'native-run', + 'automation':'script,5b4e0237da074764', + 'env':env} + + # Prepare exploration + # Note that from Python 3.7, dictionaries are ordered so we can define order for exploration in json/yaml + # ${{XYZ}} ${{ABC(range(1,2,3))}} + + # Extract exploration expressions from {{VAR{expression}}} + explore = i.get('explore', {}) + + j = 1 + k = 0 + while j>=0: + j = cmd.find('}}}', k) + if j>=0: + k = j+1 + + l = cmd.rfind('{{',0, j) + + if l>=0: + l2 = cmd.find('{', l+2, j) + if l2>=0: + k = l2+1 + + var = cmd[l+2:l2] + expr = cmd[l2+1:j] + + explore[var] = expr + + cmd = cmd[:l2]+ cmd[j+1:] + + + # Separate Design Space Exploration into var and range + explore_keys=[] + explore_dimensions=[] + + for k in explore: + v=explore[k] + + explore_keys.append(k) + + if type(v)!=list: + v=eval(v) + + explore_dimensions.append(v) + + # Next command will run all iterations so we need to redo above command once again + step = 0 + + steps = itertools.product(*explore_dimensions) + + num_steps = len(list(steps)) + + steps = itertools.product(*explore_dimensions) + + ii_copy = copy.deepcopy(ii) + + for dimensions in steps: + + step += 1 + + print ('================================================================') + print ('Experiment step: {} out of {}'.format(step, num_steps)) + + print ('') + + ii = copy.deepcopy(ii_copy) + + env = ii.get('env', {}) + + l_dimensions=len(dimensions) + if l_dimensions>0: + print (' Updating ENV variables during exploration:') + + print ('') + for j in range(l_dimensions): + v = dimensions[j] + k = explore_keys[j] + print (' - Dimension {}: "{}" = {}'.format(j, k, v)) + + env[k] = str(v) + + print ('') + + # Generate UID and prepare extra directory: + r = utils.gen_uid() + if r['return']>0: return r + + uid = r['uid'] + + experiment_path3 = os.path.join(experiment_path2, uid) + if not os.path.isdir(experiment_path3): + os.makedirs(experiment_path3) + + # Get date time of experiment + r = utils.get_current_date_time({}) + if r['return']>0: return r + + current_datetime = r['iso_datetime'] + + # Change current path + print ('Path to experiment step: {}'.format(experiment_path3)) + print ('') + os.chdir(experiment_path3) + + # Prepare and run experiment in a given placeholder directory + os.chdir(experiment_path3) + + ii['env'] = env + + # Change only in CMD + env_local={'CD':cur_dir, + 'CM_EXPERIMENT_STEP':str(step), + 'CM_EXPERIMENT_PATH':experiment_path, + 'CM_EXPERIMENT_PATH2':experiment_path2, + 'CM_EXPERIMENT_PATH3':experiment_path3} + + + # Update {{}} in CMD + cmd_step = cmd + + j = 1 + k = 0 + while j>=0: + j = cmd_step.find('{{', k) + if j>=0: + k = j + l = cmd_step.find('}}',j+2) + if l>=0: + var = cmd_step[j+2:l] + + # Such vars must be in env + if var not in env and var not in env_local: + return {'return':1, 'error':'key "{}" is not in env during exploration'.format(var)} + + if var in env: + value = env[var] + else: + value = env_local[var] + + cmd_step = cmd_step[:j] + str(value) + cmd_step[l+2:] + + ii['command'] = cmd_step + + print ('Generated CMD:') + print ('') + print (cmd_step) + print ('') + + # Prepare experiment step input + experiment_step_input_file = os.path.join(experiment_path3, self.CM_INPUT_FILE) + + r = utils.save_json(file_name=experiment_step_input_file, meta=ii) + if r['return']>0: return r + + experiment_step_output_file = os.path.join(experiment_path3, self.CM_OUTPUT_FILE) + if os.path.isfile(experiment_step_output_file): + os.delete(experiment_step_output_file) + + # Run CMD + rr=self.cmind.access(ii) + if rr['return']>0: return rr + + # Record output + result = {} + + if os.path.isfile(experiment_step_output_file): + r = utils.load_json(file_name=experiment_step_output_file) + if r['return']>0: return r + + result = r['meta'] + + #Try to flatten + try: + flatten_result = flatten_dict(result) + result = flatten_result + except: + pass + + # Add extra info + result['uid'] = uid + result['iso_datetime'] = current_datetime + + # Attempt to append to the main file ... + all_results = [] + + if os.path.isfile(experiment_result_file): + r = utils.load_json(file_name=experiment_result_file) + if r['return']>0: return r + + all_results = r['meta'] + + all_results.append(result) + + r = utils.save_json(file_name=experiment_result_file, meta = all_results) + if r['return']>0: return r + + + rr = {'return':0, + 'experiment_path':experiment_path, + 'experiment_path2':experiment_path2} + + return rr + + + + + ############################################################ + def rerun(self, i): + """ + Rerun experiment + + cm run experiment --rerun=True ... + """ + + i['rerun']=True + + return self.run(i) + + + + + + + + + + + + + ############################################################ + def replay(self, i): + """ + Replay experiment + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): experiment artifact + + (tags) (str): experiment tags separated by comma + + (dir) (str): experiment directory (often date time) + (uid) (str): unique ID of an experiment + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + # Find or add artifact based on repo/alias/tags + i['fail_if_not_found']=True + r = self._find_or_add_artifact(i) + if r['return']>0: return r + + experiment = r['experiment'] + + console = i.get('out','')=='con' + + # Print experiment folder + experiment_path = experiment.path + + if console: + print ('') + print ('Path to CM experiment artifact: {}'.format(experiment_path)) + + # Check date and time folder + uid = i.get('uid', '') + datetime = i.get('dir', '') + + if datetime!='': + datetimes = [datetime] + else: + directories = os.listdir(experiment_path) + + datetimes = sorted([f for f in directories if os.path.isfile(os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) + + if len(datetimes)==0: + return {'return':1, 'error':'experiment(s) not found in {}'.format(experiment_path)} + + # Check datetime directory + found_result = {} + + if uid!='': + for d in datetimes: + r = self._find_uid({'path':experiment_path, 'datetime':d, 'uid':uid}) + if r['return']>0: return r + + if len(r.get('result',{}))>0: + found_result = r['result'] + datetime = d + experiment_path2 = os.path.join(experiment_path, datetime) + break + + if len(found_result)==0: + return {'return':1, 'error':'couldn\'t find result with UID {} in {}'.format(uid, experiment_path)} + + else: + if len(datetimes)==1: + datetime = datetimes[0] + else: + print ('') + print ('Available experiments:') + + datetimes = sorted(datetimes) + + num = 0 + print ('') + for d in datetimes: + print ('{}) {}'.format(num, d.replace('.',' '))) + num += 1 + + if not console: + return {'return':1, 'error':'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'} + + print ('') + x=input('Make your selection or press Enter for 0: ') + + x=x.strip() + if x=='': x='0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + datetime = datetimes[selection] + + # Final path to experiment + experiment_path2 = os.path.join(experiment_path, datetime) + + if not os.path.isdir(experiment_path2): + return {'return':1, 'error':'experiment path not found {}'.format(experiment_path2)} + + r = self._find_uid({'path':experiment_path, 'datetime':datetime}) + if r['return']>0: return r + + results = r['meta'] + + if len(results)==0: + return {'return':1, 'error':'results not found in {}'.format(experiment_path2)} + + elif len(results)==1: + selection = 0 + + else: + print ('') + print ('Available Unique IDs of results:') + + results = sorted(results, key=lambda x: x.get('uid','')) + + num = 0 + print ('') + for r in results: + print ('{}) {}'.format(num, r.get('uid',''))) + num += 1 + + if not console: + return {'return':1, 'error':'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'} + + print ('') + x=input('Make your selection or press Enter for 0: ') + + x=x.strip() + if x=='': x='0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + found_result = results[selection] + uid = found_result['uid'] + + # Final info + if console: + print ('') + print ('Path to experiment: {}'.format(experiment_path2)) + + print ('') + print ('Result UID: {}'.format(uid)) + + # Attempt to load cm-input.json + experiment_input_file = os.path.join(experiment_path2, self.CM_INPUT_FILE) + + if not os.path.isfile(experiment_input_file): + return {'return':1, 'error':'{} not found - can\'t replay'.format(self.CM_INPUT_FILE)} + + r = utils.load_json(experiment_input_file) + if r['return']>0: return r + + cm_input = r['meta'] + + tags = cm_input.get('tags','').strip() + if 'replay' not in tags: + if tags!='': tags+=',' + tags+='replay' + cm_input['tags'] = tags + + if console: + print ('') + print ('Experiment input:') + print ('') + print (json.dumps(cm_input, indent=2)) + print ('') + + # Run experiment again + r = self.cmind.access(cm_input) + if r['return']>0: return r + + # TBA - validate experiment, etc ... + + + return {'return':0} + + + ############################################################ + def _find_or_add_artifact(self, i): + """ + Find or add experiment artifact (reused in run and reply) + + Args: + (CM input dict): + + (fail_if_not_found) (bool) - if True, fail if experiment is not found + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + experiment (CM artifact class): Experiment artifact + + """ + + console = i.get('out','')=='con' + + # Try to find experiment artifact by alias and/or tags + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags']) + ii['action']='find' + + ii_copy = copy.deepcopy(ii) + + # If artifact is specified, remove tags + artifact = ii.get('artifact','').strip() + if artifact!='' and not artifact.endswith(':') \ + and '*' not in artifact and '?' not in artifact: + if 'tags' in ii: del(ii['tags']) + + r = self.cmind.access(ii) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)>1: + print ('More than 1 experiment artifact found:') + + lst = sorted(lst, key=lambda x: x.path) + + num = 0 + print ('') + for e in lst: + print ('{}) {}'.format(num, e.path)) + print (' Tags: {}'.format(','.join(e.meta.get('tags',[])))) + num += 1 + + if not console: + return {'return':1, 'error':'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'} + + print ('') + x=input('Make your selection or press Enter for 0: ') + + x=x.strip() + if x=='': x='0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + experiment = lst[selection] + + elif len(lst)==1: + experiment = lst[0] + else: + # Create new entry + if i.get('fail_if_not_found',False): + return {'return':1, 'error':'experiment not found'} + + ii = copy.deepcopy(ii_copy) + ii['action']='add' + r = self.cmind.access(ii) + if r['return']>0: return r + + experiment_uid = r['meta']['uid'] + + r = self.cmind.access({'action':'find', + 'automation':'experiment,a0a2d123ef064bcb', + 'artifact':experiment_uid}) + if r['return']>0: return r + + lst = r['list'] + if len(lst)==0 or len(lst)>1: + return {'return':1, 'error':'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)} + + experiment = lst[0] + + return {'return':0, 'experiment':experiment} + + ############################################################ + def _find_uid(self, i): + """ + Find experiment result with a given UID + + Args: + (CM input dict): + + path (str): path to experiment artifact + datetime (str): sub-path to experiment + (uid) (str): experiment UID + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + path_to_file (str): path to experiment result file + meta (dict): complete list of all results + result (dict): result dictionary with a given UID + + """ + + path = i['path'] + datetime = i['datetime'] + uid = i.get('uid', '').strip() + + path_to_experiment_result_file = os.path.join(path, datetime, self.CM_RESULT_FILE) + + rr={'return':0, 'path_to_file':path_to_experiment_result_file} + + if os.path.isfile(path_to_experiment_result_file): + r = utils.load_json(file_name=path_to_experiment_result_file) + if r['return']>0: return r + + meta = r['meta'] + + rr['meta'] = meta + + # Searching for UID + if uid!='': + for result in meta: + ruid = result.get('uid', '').strip() + if ruid!='' and ruid==uid: + rr['result']=result + break + + return rr + +############################################################################ +def flatten_dict(d, flat_dict = {}, prefix = ''): + + for k in d: + v = d[k] + + if type(v) is dict: + flatten_dict(v, flat_dict, prefix+k+'.') + else: + flat_dict[prefix+k] = v + + return flat_dict diff --git a/automation/experiment/tests/test2.bat b/automation/experiment/tests/test2.bat new file mode 100644 index 0000000000..5ecb3a0d8d --- /dev/null +++ b/automation/experiment/tests/test2.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%% diff --git a/automation/experiment/tests/test2.sh b/automation/experiment/tests/test2.sh new file mode 100644 index 0000000000..40d60a25a3 --- /dev/null +++ b/automation/experiment/tests/test2.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- echo "\${VAR1} --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-\${VAR3}" \ No newline at end of file diff --git a/automation/experiment/tests/test3.bat b/automation/experiment/tests/test3.bat new file mode 100644 index 0000000000..800e36076d --- /dev/null +++ b/automation/experiment/tests/test3.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} diff --git a/automation/experiment/tests/test3.sh b/automation/experiment/tests/test3.sh new file mode 100644 index 0000000000..148e564337 --- /dev/null +++ b/automation/experiment/tests/test3.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} diff --git a/automation/experiment/tests/test3_input.yaml b/automation/experiment/tests/test3_input.yaml new file mode 100644 index 0000000000..1c789f52a5 --- /dev/null +++ b/automation/experiment/tests/test3_input.yaml @@ -0,0 +1,4 @@ +explore: + VAR1: [1,2,3] + VAR2: ["a","b"] + CM_ENV_TEST3: "[2**i for i in range(0,6)]" diff --git a/automation/experiment/tests/test__json.bat b/automation/experiment/tests/test__json.bat new file mode 100644 index 0000000000..16eb9184b8 --- /dev/null +++ b/automation/experiment/tests/test__json.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.json -- {{CD}}\test_run.bat diff --git a/automation/experiment/tests/test__json.sh b/automation/experiment/tests/test__json.sh new file mode 100644 index 0000000000..a46cb98f5a --- /dev/null +++ b/automation/experiment/tests/test__json.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.json -- {{CD}}/test_run.sh diff --git a/automation/experiment/tests/test__yaml.bat b/automation/experiment/tests/test__yaml.bat new file mode 100644 index 0000000000..e583f209bf --- /dev/null +++ b/automation/experiment/tests/test__yaml.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- {{CD}}\test_run.bat diff --git a/automation/experiment/tests/test__yaml.sh b/automation/experiment/tests/test__yaml.sh new file mode 100644 index 0000000000..60c2f7a80c --- /dev/null +++ b/automation/experiment/tests/test__yaml.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- {{CD}}/test_run.sh diff --git a/automation/experiment/tests/test_input.json b/automation/experiment/tests/test_input.json new file mode 100644 index 0000000000..f682f5a344 --- /dev/null +++ b/automation/experiment/tests/test_input.json @@ -0,0 +1,14 @@ +{ + "explore": { + "VAR1": [ + 1, + 2, + 3 + ], + "VAR2": [ + "a", + "b" + ], + "VAR3": "[2**i for i in range(0,6)]" + } +} diff --git a/automation/experiment/tests/test_input.yaml b/automation/experiment/tests/test_input.yaml new file mode 100644 index 0000000000..a621c5ef95 --- /dev/null +++ b/automation/experiment/tests/test_input.yaml @@ -0,0 +1,4 @@ +explore: + VAR1: [1,2,3] + VAR2: ["a","b"] + VAR3: "[2**i for i in range(0,6)]" diff --git a/automation/experiment/tests/test_run.bat b/automation/experiment/tests/test_run.bat new file mode 100644 index 0000000000..b3aa91028e --- /dev/null +++ b/automation/experiment/tests/test_run.bat @@ -0,0 +1,3 @@ +echo %VAR1% --batch_size=%VAR3% %VAR2% + +echo {"x":%VAR1%, "y":"%VAR2%", "z":%VAR3%} > cm-output.json diff --git a/automation/experiment/tests/test_run.sh b/automation/experiment/tests/test_run.sh new file mode 100644 index 0000000000..7ed1b472ed --- /dev/null +++ b/automation/experiment/tests/test_run.sh @@ -0,0 +1 @@ +echo $VAR1 --batch_size=$VAR3 $VAR2 diff --git a/automation/project/README.md b/automation/project/README.md new file mode 100644 index 0000000000..e684ac7ade --- /dev/null +++ b/automation/project/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test project``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15)) + * CM CLI with UID: ```cm test project,6882553224164c56``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'project,6882553224164c56' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/project/_cm.json b/automation/project/_cm.json new file mode 100644 index 0000000000..68042c4319 --- /dev/null +++ b/automation/project/_cm.json @@ -0,0 +1,10 @@ +{ + "alias": "project", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", + "tags": [ + "automation" + ], + "uid": "6882553224164c56" +} diff --git a/automation/project/module.py b/automation/project/module.py new file mode 100644 index 0000000000..be8d6e7b1d --- /dev/null +++ b/automation/project/module.py @@ -0,0 +1,52 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} diff --git a/automation/report/README.md b/automation/report/README.md new file mode 100644 index 0000000000..6f2f966963 --- /dev/null +++ b/automation/report/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test report``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15)) + * CM CLI with UID: ```cm test report,6462ecdba2054467``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'report,6462ecdba2054467' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/report/_cm.json b/automation/report/_cm.json new file mode 100644 index 0000000000..8808957575 --- /dev/null +++ b/automation/report/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "report", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "6462ecdba2054467" +} diff --git a/automation/report/module.py b/automation/report/module.py new file mode 100644 index 0000000000..be8d6e7b1d --- /dev/null +++ b/automation/report/module.py @@ -0,0 +1,52 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} diff --git a/automation/script/README-extra.md b/automation/script/README-extra.md new file mode 100644 index 0000000000..c5e2607bbc --- /dev/null +++ b/automation/script/README-extra.md @@ -0,0 +1,1023 @@ +[ [Back to index](../../../docs/README.md) ] + +# CM "script" automation + +
+Click here to see the table of contents. + + * [Motivation](#motivation) + * [Obtaining shared CM scripts](#obtaining-shared-cm-scripts) + * [Getting started with CM scripts](#getting-started-with-cm-scripts) + * [Understanding CM scripts](#understanding-cm-scripts) + * [Wrapping native scripts](#wrapping-native-scripts) + * [Modifying environment variables](#modifying-environment-variables) + * [Understanding unified output dictionary](#understanding-unified-output-dictionary) + * [Modifying state dictionary](#modifying-state-dictionary) + * [Running CM scripts via CM Python API](#running-cm-scripts-via-cm-python-api) + * [Assembling pipelines (workflows) of CM scripts](#assembling-pipelines-workflows-of-cm-scripts) + * [Customizing CM script execution flow](#customizing-cm-script-execution-flow) + * [Caching output of CM scripts](#caching-output-of-cm-scripts) + * [Assembling pipeline to compile and run image corner detection](#assembling-pipeline-to-compile-and-run-image-corner-detection) + * [Customizing sub-dependencies in a pipeline](#customizing-sub-dependencies-in-a-pipeline) + * [Using Python virtual environments](#using-python-virtual-environments) + * [Assembling pipelines with other artifacts included](#assembling-pipelines-with-other-artifacts-included) + * [Unifying host OS and CPU detection](#unifying-host-os-and-cpu-detection) + * [Detecting, installing and caching system dependencies](#detecting-installing-and-caching-system-dependencies) + * [Using variations](#using-variations) + * [Running CM scripts inside containers](#running-cm-scripts-inside-containers) + * [Getting help about other script automation flags](#getting-help-about-other-script-automation-flags) + * [Further reading](#further-reading) + +
+ +*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md) + and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts. + You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md) + to run some applications and benchmarks on your platform using CM scripts.* + +## Motivation + +While helping the community reproduce [150+ research papers](https://learning.acm.org/techtalks/reproducibility), +we have noticed that researchers always create their own ad-hoc scripts, environment variable and files +to perform *exactly the same steps (actions) across all papers* to prepare, run and reproduce their experiments +across different software, hardware, models and data. + +![](https://raw.githubusercontent.com/ctuning/ck-guide-images/master/cm-ad-hoc-projects.png) + +This experience motivated us to create a CM automation called "script" to warp native scripts +from research and industrial projects with a common, simple and unified CM Command Line Interface and Python API. + +Such non-intrusive wrapping helps to make numerous native scripts and tools more reusable, interoperable, portable, findable +and deterministic across different projects with different artifacts based on [FAIR principles](https://www.go-fair.org/fair-principles). + +CM scripts can be embedded into existing projects with minimal or no modifications at all, and they can be connected +into powerful and portable pipelines and workflows using simple JSON or YAML files +to prepare, run and reproduce experiments across continuously changing technology. + +Importantly, CM scripts can be executed in the same way in a native user environment, +Python virtual environments (to avoid messing up native environment) and containers +while automatically adapting to a given environment! + +![](https://raw.githubusercontent.com/ctuning/ck-guide-images/master/cm-unified-projects.png) + + + + + +## Obtaining shared CM scripts + +In order to reuse some CM scripts embedded into shared projects, +you need to install these projects via the CM interface. + +For example, to use automation scripts developed by the [MLCommons task force on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) +and shared via GitHub, you just need to pull this repository via CM: + +```bash +cm pull repo --url=https://github.com/mlcommons/ck +``` + +or + +```bash +cm pull repo mlcommons@ck +``` + +You can now see all available CM scripts in your system as follows: + +```bash +cm find script +cm find script install* | sort + +``` + + +## Getting started with CM scripts + +You can run any of the above CM script on any platform as follows: +```bash +cm run script "tags separated by space" --keys=values --env.KEY=VALUE +cm run script --tags="tags separated by comma" --keys=values --env.KEY=VALUE +``` +or using a shortcut `cmr` available in CM V1.4.0+: +```bash +cmr "tags separated by space" --keys=values --env.KEY=VALUE +``` + +You can also use `-j` flag to print JSON output at the end of the script execution +and `-v` flag to show extra debug information during script execution. + +For example, you can download a RESNET-50 model in ONNX format from Zenodo using the following script: +```bash +cmr "download file" --url=https://zenodo.org/record/4735647/files/resnet50_v1.onnx +``` + +You can also obtain info about your OS (Linux, Windows, MacOS) in a unified way and print JSON output +as well as CM debug info as follows: +```bash +cmr "detect os" -j -v +``` + +## Understanding CM scripts + +CM scripts are treated as standard CM artifacts with the associated CM automation ["script"](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script), +CM action ["run"](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/module.py#L73), +and JSON and/or YAML meta descriptions. + +CM scripts can be invoked by using their alias, unique ID and human-readable tags (preferred method). + +For example, the [CM "Print Hello World" script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-hello-world) +simply wraps 2 native `run.sh` and `run.bat` scripts to print "Hello World" on Linux, MacOs or Windows +together with a few environment variables: + +```bash +ls `cm find script print-hello-world` + +README.md _cm.json run.bat run.sh +``` + +It is described by this [_cm.json meta description file](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world/_cm.json) +with the following alias, UID and tags: + +```json +{ + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + + "alias": "print-hello-world", + "uid": "b9f0acba4aca4baa", + + "default_env": { + "CM_ENV_TEST1": "TEST1" + }, + + "env": { + "CM_ENV_TEST2": "TEST2" + }, + + "input_mapping": { + "test1": "CM_ENV_TEST1" + }, + + "new_env_keys": [ + "CM_ENV_TEST*" + ], + + "new_state_keys": [ + "hello_test*" + ], + + "tags": [ + "print", + "hello-world", + "hello world", + "hello", + "world", + "native-script", + "native", + "script" + ] +} +``` + +The `automation_alias` and `automation_uid` tells CM that this artifact can be used with the CM "script" automation. + +Therefore, this script can be executed from the command line in any of the following ways: + +```bash +cm run script print-hello-world +cm run script b9f0acba4aca4baa +cm run script --tags=print,native-script,hello-world +cm run script "print native-script hello-world" +``` + +The same script can be also executed using CM Python API as follows: +```python +import cmind + +output = cmind.access({'action':'run', 'automation':'script', 'tags':'print,native-script,hello-world'}) +if output['return']>0: + cmind.error(output) + +import json +print (json.dumps(output, indent=2)) +``` + +Normally you should see the following output along with some debug information (that will be removed soon): + +```bash + +... + +CM_ENV_TEST1 = TEST1 +CM_ENV_TEST2 = TEST2 + +HELLO WORLD! +... +``` + +### Wrapping native scripts + +*run.bat* and *run.sh* are native scripts that will be executed by this CM script in a unified way on Linux, MacOS and Windows: + +```bash +echo "" +echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}" +echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}" + +echo "" +echo "HELLO WORLD!" +``` + +The idea to use native scripts is to make it easier for researchers and engineers to reuse their existing automation scripts +while providing a common CM wrapper with a unified CLI, Python API and extensible meta descriptions. + + + + +### Modifying environment variables + +CM script automation CLI uses a flag `--env.VAR=VALUE` to set some environment variable and pass it to a native script +as shown in this example: + +```bash +cm run script "print native-script hello-world" \ + --env.CM_ENV_TEST1=ABC1 --env.CM_ENV_TEST2=ABC2 + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = TEST2 + +HELLO WORLD! +``` + +Note, that *CM_ENV_TEST2* did not change. This happened because dictionary `env` in the *_cm.json* forces *CM_ENV_TEST2* to *TEST2*, +while `default_env` dictionary allows environment variables to be updated externally. + +You can still force an environment variable to a given value externally using a `--const` flag as follows: + +```bash +cm run script "print native-script hello-world" \ + --env.CM_ENV_TEST1=ABC1 --const.CM_ENV_TEST2=ABC2 + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = ABC2 + +HELLO WORLD! + +``` + +You can also use a JSON file instead of flags. Create *input.json* (or any other filename): +```json +{ + "tags":"print,native-script,hello-world", + "env":{ + "CM_ENV_TEST1":"ABC1" + } +} +``` + +and run the CM script with this input file as follows: +``` +cm run script @input.json +``` + + +You can use YAML file instead of CLI. Create *input.yaml* (or any other filename): +```yaml +tags: "print,hello-world,script" +env: + CM_ENV_TEST1: "ABC1" +``` + +and run the CM script with this input file as follows: +``` +cm run script @input.yaml +``` + +Finally, you can map any other flag from the script CLI to an environment variable +using the key `input_mapping` in the `_cm.json` meta description of this script: + +```bash +cm run script "print native-script hello-world" --test1=ABC1 + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = TEST2 + +HELLO WORLD! + +``` + + +### Understanding unified output dictionary + +You can see the output of a given CM script in the JSON format by adding `--out=json` flag as follows: + +```bash +cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = ABC2 + +HELLO WORLD! + +{ + "deps": [], + "env": { + "CM_ENV_TEST1": "ABC1", + "CM_ENV_TEST2": "TEST2" + }, + "new_env": { + "CM_ENV_TEST1": "ABC1", + "CM_ENV_TEST2": "TEST2" + }, + "new_state": {}, + "return": 0, + "state": {} +} +``` + +Note that `new_env`shows new environment variables produced and explicitly exposed by this script +via a `new_env_keys` key in the `_cm.json` meta description of this script. + +This is needed to assemble automation pipelines and workflows while avoiding their contamination +with temporal environments. CM script must explicitly expose environment variables that will +go to the next stage of a pipeline. + +In the following example, `CM_ENV_TEST3` will be added to the `new_env` while `CM_XYZ` will not +since it is not included in `"new_env_keys":["CM_ENV_TEST*"]`: + +```bash +cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json --env.CM_ENV_TEST3=ABC3 --env.CM_XYZ=XYZ +``` + +### Modifying state dictionary + +Sometimes, it is needed to use more complex structures than environment variables in scripts and workflows. +We use a dictionary `state` that can be updated and exposed by a given script via `new_state_keys` key +in the `_cm.json` meta description of this script. + +In the following example, `hello_world` key will be updated in the `new_state` dictionary, +while `hello` key will not be updated because it is not included in the wild card `"new_state_key":["hello_world*"]`: + +```bash +cm run script --tags=print,hello-world,script --out=json \ + --state.hello=xyz1 --state.hello_world=xyz2 + +... + +{ + "deps": [], + "env": { + "CM_ENV_TEST1": "TEST1", + "CM_ENV_TEST2": "TEST2" + }, + "new_env": { + "CM_ENV_TEST1": "TEST1", + "CM_ENV_TEST2": "TEST2" + }, + "new_state": { + "hello_world": "xyz2" + }, + "return": 0, + "state": { + "hello": "xyz1", + "hello_world": "xyz2" + } +} +``` + +### Running CM scripts via CM Python API + +You can run a given CM script from python or Jupyter notebooks as follows: + +```python + +import cmind + +r = cmind.access({'action':'run', + 'automation':'script', + 'tags':'print,hello-world,script', + 'const':{ + 'CM_ENV_TEST1':'ABC1', + }, + 'env':{ + 'CM_ENV_TEST2':'ABC2' + }, + 'state': { + 'hello':'xyz1', + 'hello_world':'xyz2' + } + }) + +print (r) + +``` + +```bash +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = ABC2 + +HELLO WORLD! + +{'return': 0, + 'env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'}, + 'new_env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'}, + 'state': {'hello': 'xyz1', 'hello_world': 'xyz2'}, + 'new_state': {'hello_world': 'xyz2'}, + 'deps': []} + +``` + + + +### Assembling pipelines (workflows) of CM scripts + +We've added a simple mechanism to chain reusable CM scripts into complex pipelines +without the need for specialized workflow frameworks. + +Simply add the following dictionary "deps" to the `_cm.json` or `_cm.yaml` of your script as follows: + +```json + +{ + "deps": [ + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script" + }, + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script" + }, + ... + ] +} + +``` + +This CM script will run all dependent scripts in above sequence, aggregate environment variable and `state` dictionary, +and will then run native scripts. + +You can also turn on specific dependencies based on some values in specific environment variables or min/max version (if supported) +in this pipeline as follows: + +```json + +{ + "deps": [ + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script", + "enable_if_env": { "USE_CUDA" : ["yes", "YES", "true"] } + }, + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script" + "enable_if_env": { "USE_CPU" : ["yes", "YES", "true"] }, + "version_min": "3.10" + }, + ... + ] +} + +``` + +You can also specify dependencies to be invoked after executing native scripts +using a dictionary `"post_deps"` with the same format `"deps"`. + + +You can see an example of such dependencies in the [_cm.json](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py/_cm.json) +of the ["print-hello-world-py" CM script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py) +that detects and unifies OS parameters using the ["detect-os" CM script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os), +detects or builds Python using the ["get-python3" CM script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) +and then runs `code.py` with "Hello World" from `run.sh` or `run.bat`: + +```bash +cm run script "print python hello-world" +``` + + + + + + +### Customizing CM script execution flow + +If a developer adds `customize.py` file inside a given CM script, +it can be used to programmatically update environment variables, prepare input scripts +and even invoke other scripts programmatically using Python. + +If a function `preprocess` exists in this file, CM script will call it before +invoking a native script. + +If this function returns `{"skip":True}` in the output, +further execution of this script will be skipped. + +After executing the preprocess function, the CM script automation will record the global state dictionary +into *tmp-state.json* and the local state dictionary from this CM script into *tmp-state-new.json*. + +The CM script automation will then run a native script (run.sh on Linux/MacOS or run.bat on Windows) +with all merged environment variables from previous scripts. + +Note that native scripts can also create 2 files that will be automatically picked up and processed by the CM script automation: +* *tmp-run-env.out* - list of environment variables to update the "new_env" of a given CM script +* *tmp-run-state.json* - the state dictionary to update the "new_state" of a given CM script + +If `postprocess` function exists in the *customize.py* file, the CM script will call it +to finalize the postprocessing of files, environment variables, and the state dictionary. + +You can see an [example of such `customize.py` module](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py) in the CM script +to [detect or install/build Python interpreter](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) in a unified way on any machine. + +This script exposes a number of environment variables for a detected Python +in the [`postprocess` function](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py#L60): + +* `CM_PYTHON_BIN` - python3.10 or python.exe or any other name of a Python interpreter on a given system +* `CM_PYTHON_BIN_PATH` - path to a detected or installed python +* `CM_PYTHON_BIN_WITH_PATH` - full path to a detected or installed python +* `LD_LIBRARY_PATH` - updated LD_LIBRARY_PATH to python +* `PATH` - updated PATH to python + +These environment variables can be reused by other CM scripts or external tools +while decoupling them from specific python versions and paths, and even allowing +multiple versions of tools and artifacts to co-exist on the same system +and plugged into CM scripts: + +```bash +cm run script "get python3" --out=json +``` + + + +### Caching output of CM scripts + +By default, CM scripts run wrapped scripts and tools, update environment variables and produce new files in the current directory. + +In many cases, we want to cache the output and environment variables when we run the same CM script with the same input again +to avoid potentially lengthy detections, downloads, builds and data pre/post processing. + +That's why we have developed another CM automation called ["cache"](../cache/README-extra.md) +to cache the output of scripts in the "cache" artifacts in the "local" CM repository +that can be found by tags or unique IDs like any other CM artifact. + +Our convention is to use names *get-{tool or artifact}* for CM scripts that detect already installed artifacts, +prepare their environment and cache them in the *local* CM repository using the "cache" automation. + +If installed artifact doesn't exist, we either enhance above scripts to include download, installation and even building +for a given artifact (if it's a tool) or we create extra CM scripts *install-{tool or artifact}* +that download and prepare tools and artifacts (install, build, preprocess, etc). + +For example, the CM script [*get-python3*](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) +has *customize.py* with *preprocess* function that implements the search for python3 on Linux +or python.exe on Windows, 2 native scripts *run.sh* and *run.bat* to obtain the version of the detected python installation, +and *postprocess* function to prepare environment variables *CM_PYTHON_BIN* and *CM_PYTHON_BIN_WITH_PATH* +that can be used by other CM scripts: + +```bash +cm run script "get python" --out=json +``` + +If you run it for the first time and CM script detects multiple versions of python co-existing on your system, +it will ask you to select one. CM will then cache the output in the *cache* artifact of the CM repository. +You can see all *cache* CM entries for other tools and artifacts as follows: + +```bash +cm show cache +``` +or +```bash +cm show cache --tags=get,python +``` + +You can see the cached files as follows: +```bash +ls `cm find cache --tags=get,python` +``` + +* _cm.json - CM meta description of this "cache" artifact with its unique ID, tags and other meta information +* cm-cached-state.json - dictionary with the new environment variables and the new state dictionary +* tmp-env-all.sh - all environment variables used during CM script execution +* tmp-env.sh - only new environment variables produced after CM script execution (it can be used directly by external tools) +* tmp-run.sh - all environment variables and a call to the native script (useful for reproducibility) +* tmp-state.json - the state before running native script - it can be loaded and used by native scripts and tools instead of using environment variables +* tmp-ver.out - the output of the --version command parsed by `postprocess` and `detect_version` functions in `customize.py` + + +If you (or other CM script) run this CM script to get the python tool for the second time, CM script will reuse the cached output: +```bash +cm run script "get python" --out=json +``` + +This also allows us to install multiple tool versions into different CM cache entries (python virtual environments, +LLVM compiler, etc) and use them separately without the need to change higher-level CM scripts - these tools +will be automatically plugged in: + +```bash +cm run script "install prebuilt llvm" --version=14.0.0 +cm run script "install prebuilt llvm" --version=16.0.0 +cm run script "install src llvm" +``` + + +Such approach allows us to "probe" the user environment, detect different tools and artifacts, unify them +and adapt complex applications to a user environment in an automatic, transparent and non-intrusive way +as shown in the next example. + + + + + + +## Assembling pipeline to compile and run image corner detection + +We can use automatically detected compiler from CM script to create simple and technology-neutral compilation and execution pipelines +in CM scripts. + +For example, we have implemented a simple [image corner detection CM script]( https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-corner-detection ) +with [this meta description](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-corner-detection/_cm.json). + +It uses two other reusable CM scripts to compile a given program using a detected/installed and cached compiler via CM (such as LLVM), +and then run it with some input image. + +First, let's detect installed LLVM it via CM: + +```bash +cm run script "get llvm" +``` +or install a prebuilt version on Linux, MacOs or Windows: +```bash +cm run script "install prebuilt llvm" --version=14.0.0 +``` + +We can then run this CM script to compile and run image corner detection as follows: +```bash +cm run script "app image corner-detection" --input=`cm find script --tags=app,image,corner-detection`/computer_mouse.pgm +``` + +This CM script will preset environment variables for a detected/installed compiler, +compile our C program, run it via `run.sh` (Linux/MacOS) or `run.bat` (Windows) +and generate an output image *output_image_with_corners.pgm* in the `output` directory of this script: + +```bash +ls `cm find script --tags=app,image,corner-detection`/output + +image-corner output_image_with_corners.pgm + +``` + +Note that this directory also contains the compiled tool "image-corner" that can now be used independently from CM if necessary. + + + + +### Customizing sub-dependencies in a pipeline + +When running a CM script with many sub-dependencies similar to above example, +we may want to specify some version constraints on sub-dependencies such as LLVM. + +One can use the key `"names"` in the "deps" list of any CM script meta description +to specify multiple names for a given dependency. + +For example, a dependency to "get compiler" in CM script "compile-program" +has `"names":["compiler"]` as shown [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/compile-program/_cm.json#L15). + +We can now use a CM script flag `--add_deps_recursive.{some name}.{some key}={some value}` or +`--adr.{above name}.{some key}={some value}` to update a dictionary of all sub-dependencies +that has `some name`. + +For example, we can now specify to use LLVM 16.0.0 for image corner detection as follows: +```bash +cm run script "app image corner-detection" --adr.compiler.tags=llvm --adr.compiler.version=16.0.0 +``` + +If this compiler was not yet detected or installed by CM, it will find related scripts +to install either a prebuilt version of LLVM or build it from sources. + + +## Using Python virtual environments + +By default, CM scripts will install python dependencies into user space. +This can influence other existing projects and may not be desirable. +CM can be used inside virtual Python environments without any changes, +but a user still need to do some manual steps to set up such environment. +That's why we've developed a [CM script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-python-venv) +to automate creation of multiple Python virtual environments with different names: + +```bash +cm run script "install python-venv" --name={some name} +``` + +CM will create a virtual environment using default Python and save it in CM cache. +It is possible to create a python virtual environment with a minimal required version +or a specific one on Linux and MacOS as follows: + +```bash +cm run script "install python-venv" --version_min=3.8 --name=mlperf +cm run script "install python-venv" --version=3.10.8 --name=mlperf2 +``` + +In this case, CM will attempt to detect Python 3.10.8 on a system. +If CM can't detect it, CM will then automatically download and build it +using [this script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-python-src). + +Now, when user runs pipelines that install Python dependencies, CM will detect +virtual environment in the CM cache as well as native Python and will ask a user +which one to use. + +It is possible to avoid such questions by using the flag `--adr.python.name=mlperf`. +In such case, CM will propagate the name of a virtual environment to all sub-dependencies +as shown in the next example. + +Instead of adding this flag to all scripts, you can specify it +using `CM_SCRIPT_EXTRA_CMD` environment variable as follows: +```bash +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" +``` + +You can even specify min Python version required as follows: +```bash +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf --adr.python.version_min=3.9" +``` + +## Assembling pipelines with other artifacts included + +We can now use existing CM scripts as "LEGO" blocks to assemble more complex automation pipelines and workflows +while automatically downloading and plugging in +and pre-/post-processing all necessary artifacts (models, data sets, frameworks, compilers, etc) +on any supported platform (Linux, MacOS, Windows). + +For example, we have implemented a simple image classification application automated by the following CM script: +[*app-image-classification-onnx-py*]( https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-classification-onnx-py ). + +It is described by the following [`_cm.yaml`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) meta description: + +```yaml +alias: app-image-classification-onnx-py +uid: 3d5e908e472b417e + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular ML/AI applications" + +tags: +- app +- image-classification +- onnx +- python + +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 +- tags: get,cuda + names: + - cuda + enable_if_env: + USE_CUDA: + - yes +- tags: get,dataset,imagenet,image-classification,original +- tags: get,dataset-aux,imagenet-aux,image-classification +- tags: get,ml-model,resnet50,_onnx,image-classification + +- tags: get,generic-python-lib,_onnxruntime + skip_if_env: + USE_CUDA: + - yes +- tags: get,generic-python-lib,_onnxruntime_gpu + enable_if_env: + USE_CUDA: + - yes + +variations: + cuda: + env: + USE_CUDA: yes +``` + + +Its `deps` pipeline runs other CM scripts to detect OS parameters, detect or install Python, +install the latest ONNX run-time, download ResNet-50 model and the minimal ImageNet dataset (500). + +It also contains [`run.sh`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.sh) +and [`run.bat`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.bat) +to install extra Python requirements (not yet unified by CM scripts) +and run a Python script that classifies an image from ImageNet +or an image provided by user. + +Before running it, let us install Python virtual environment via CM to avoid altering +native Python installation: +```bash +cm run script "install python-venv" --name=my-test +cm show cache --tags=python +``` + +You can run it on any system as follows: + +```bash +cm run script "python app image-classification onnx" + +``` + + +To avoid CM asking which python to use, you can force the use of Python virtual environment +as follows: + +```bash +cm run script "python app image-classification onnx" --adr.python.name=my-test +``` + + + +If you run this CM script for the first time, it may take some minutes because it will detect, download, build and cache all dependencies. + +When you run it again, it will plug in all cached dependencies: + +```bash +cm run script "python app image-classification onnx" --adr.python.name.my-test + +``` + +You can then run it with your own image as follows: +```bash +cm run script --tags=app,image-classification,onnx,python \ + --adr.python.name.my-test --input={path to my JPEG image} +``` + + + +## Unifying host OS and CPU detection + +In order to make experiments more portable and interoperable, we need to unify +the information about host OS and CPU across different systems. +We are gradually improving the following two CM scripts: + +* [`detect-os`](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) +* [`detect-cpu`](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + +These two CM script have *customize.py* with preprocess and postprocess functions +and a native run script to detect OS info and update environment variables +and the state dictionary needed by all other CM scripts. + +You can run them on your platform as follows: + +```bash +cm run script "detect os" --out=json + +... + +cm run script "detect cpu" --out=json +``` + +If some information is missing or not consistent across different platforms, +you can improve it in a backwards compatible way. You can then submit a PR [here](https://github.com/mlcommons/ck/pulls) +to let the community reuse your knowledge and collaboratively enhance common automation scripts, pipelines and workflows - +that's why we called our project "Collective Knowledge". + + +## Detecting, installing and caching system dependencies + +Many projects require installation of some system dependencies. Unfortunately, the procedure +is different across different systems. + +That's why we have developed two other CM script to unify and automate this process on any system. + +* [`get-sys-utils-cm`]( https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm ) +* [`get-sys-utils-min`]( https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-min ) + +They will install (minimal) system dependencies based on the OS and CPU info detected by CM scripts mentioned above. + +The last script is particularly useful to make applications compatible with Windows +where many typical tools like "wget", "patch", etc are missing - they will be automatically +download by that script. + +You can use them as follows: +```bash +cm run script "get sys-utils-min" --out=json +cm run script "get sys-utils-cm" +``` + + + + +## Using variations + +In some cases, we want the same CM script to download some artifact in a different format. + +For example, we may want to download and cache ResNet50 model in ONNX or PyTorch or TensorFlow or TFLite format. + +In such case, we use so-called `variations` in the meta description of a given CM script. + +For example, the CM script [`get-ml-model-resnet50`] has many variations and combinations separated by comma +to download this model in multiple formats: + +* `onnx` +* `onnx,opset-11` +* `onnx,opset-8` +* `pytorch` +* `pytorch,fp32` +* `pytorch,int8` +* `tflite` +* `tflite,argmax` +* `tflite,no-argmax` +* `tensorflow` +* `batch_size.1` +* `batch_size.#` + +These variations simply update environment variables and add more dependencies on other CM scripts +before running `customize.py` and native scripts as described in [_cm.json]( https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-ml-model-resnet50/_cm.json#L30 ). + +It is possible to specify a required variation or multiple variations when running a given CM script by adding extra tags with "_" prefix. + +For example, you can install quantized ResNet-50 model in PyTorch int8 format as follows: + +```bash +cm run script "get ml-model resnet50 _pytorch _int8" --out=json +``` + +You can install another FP32 variation of this model at the same time: +```bash +cm run script "get ml-model resnet50 _pytorch _fp32" --out=json +``` + +You can now find them in cache by tags and variations as follows: +```bash +cm show cache --tags=get,ml-model,resnet50 +cm show cache --tags=get,ml-model,resnet50,_pytorch +cm show cache --tags=get,ml-model,resnet50,_pytorch,_fp32 +``` + + + + + + + + + + + +## Running CM scripts inside containers + +One of the important ideas behind using a common automation language +is to use it inside and outside containers thus avoiding the need to create +ad-hoc manual containers and README files. + +We can just use base containers and let the CM automation language +detect installed tools and connect external data with the automation pipelines and workflows. + +See examples of modular containers with CM language to automate the MLPerf inference benchmark from MLCommons +[here](https://github.com/mlcommons/ck/tree/master/docker). + +Note that we continue working on a CM functionality to automatically generate +Docker containers and README files when executing CM scripts +(a prototype was successfully validated in the MLPerf inference v3.0 submission): + +* https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-dockerfile +* https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-docker-image + + + + +## Getting help about other script automation flags + +You can get help about all flags used to customize execution +of a given CM script from the command line as follows: + +```bash +cm run script --help +``` + +Some flags are useful to make it easier to debug scripts and save output in files. + +You can find more info about CM script execution flow in this [document](README-specs.md). + + + + + + + + + + + + +## Further reading + +* [CM "script" automation specification](README-specs.md) +* [MLCommons CM script sources](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) +* [List of portable and reusable CM scripts from MLCommons](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) +* [CM "cache" automation](../cache/README-extra.md) diff --git a/automation/script/README-specs.md b/automation/script/README-specs.md new file mode 100644 index 0000000000..58526d1687 --- /dev/null +++ b/automation/script/README-specs.md @@ -0,0 +1,81 @@ +# CM "script" automation specification + +Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm) +for more details about the CM automation language. + +See the CM script introduction [here](README-extra.md). + +See the [automatically generated catalog](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) of all CM scripts from MLCommons. + +## Getting started with CM scripts + +* A CM script is identified by a set of tags and by unique ID. +* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. + +### CM script execution flow +* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order. +* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present. +* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed. +* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Then `postprocess` function inside customize.py is executed if present. +* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed. + +** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`. + +### Input flags +When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable. + +### Conditional execution of any `deps`, `post_deps` +We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its executional conditional + +### Versions +We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options. +* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`. +* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`. + +### Variations +* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. + +#### Variation groups +`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both. + +#### Dynamic variations +Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`. + +### ENV flow during CM script execution +* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382) +* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it. +* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys` +* Same behaviour applies to `state` dictionary. + +#### Special env keys +* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency. +* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency. +* `--input` is automatically converted to `CM_INPUT` env key +* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX` +* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token. +* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS. + +### Script Meta +#### Special keys in script meta +* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env` + +### How cache works? +* If `cache=true` is set in a script meta, the result of the script execution is cached for further use. +* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `cm-cached.json` file inside the cached folder. +* By using `--new` input, a new cache entry can be forced even when an old one exist. +* By default no depndencies are run for a cached entry unless `dynamic` key is set for it. + +### Updating ENV from inside the run script +* [TBD] + + +### Script workflow (env, deps, native scripts) + +![](assets/scripts-workflow.png) + + + + +© 2022-23 [MLCommons](https://mlcommons.org)
diff --git a/automation/script/README.md b/automation/script/README.md new file mode 100644 index 0000000000..da54b2a5db --- /dev/null +++ b/automation/script/README.md @@ -0,0 +1,427 @@ +*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* + +### Automation actions + +#### run + + * CM CLI: ```cm run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L77)) + * CM CLI with UID: ```cm run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L77)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'run' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L77) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### version + + * CM CLI: ```cm version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2041)) + * CM CLI with UID: ```cm version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2041)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'version' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2041) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### search + + * CM CLI: ```cm search script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2069)) + * CM CLI with UID: ```cm search script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2069)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'search' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2069) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### test + + * CM CLI: ```cm test script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2188)) + * CM CLI with UID: ```cm test script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2188)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2188) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### native_run + + * CM CLI: ```cm native_run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2254)) + * CM CLI with UID: ```cm native_run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2254)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'native_run' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2254) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### add + + * CM CLI: ```cm add script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2327)) + * CM CLI with UID: ```cm add script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2327)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'add' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2327) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### run_native_script + + * CM CLI: ```cm run_native_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2955)) + * CM CLI with UID: ```cm run_native_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2955)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'run_native_script' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2955) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_file_in_paths + + * CM CLI: ```cm find_file_in_paths script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2996)) + * CM CLI with UID: ```cm find_file_in_paths script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2996)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_file_in_paths' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L2996) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### detect_version_using_script + + * CM CLI: ```cm detect_version_using_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3215)) + * CM CLI with UID: ```cm detect_version_using_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3215)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'detect_version_using_script' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3215) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_artifact + + * CM CLI: ```cm find_artifact script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3288)) + * CM CLI with UID: ```cm find_artifact script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3288)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_artifact' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3288) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_file_deep + + * CM CLI: ```cm find_file_deep script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3446)) + * CM CLI with UID: ```cm find_file_deep script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3446)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_file_deep' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3446) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_file_back + + * CM CLI: ```cm find_file_back script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3504)) + * CM CLI with UID: ```cm find_file_back script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3504)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_file_back' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3504) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### parse_version + + * CM CLI: ```cm parse_version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3545)) + * CM CLI with UID: ```cm parse_version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3545)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'parse_version' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3545) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### update_deps + + * CM CLI: ```cm update_deps script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3599)) + * CM CLI with UID: ```cm update_deps script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3599)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'update_deps' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3599) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### get_default_path_list + + * CM CLI: ```cm get_default_path_list script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3619)) + * CM CLI with UID: ```cm get_default_path_list script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3619)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'get_default_path_list' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3619) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### doc + + * CM CLI: ```cm doc script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3630)) + * CM CLI with UID: ```cm doc script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3630)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'doc' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3630) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### gui + + * CM CLI: ```cm gui script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3658)) + * CM CLI with UID: ```cm gui script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3658)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'gui' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3658) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### dockerfile + + * CM CLI: ```cm dockerfile script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3695)) + * CM CLI with UID: ```cm dockerfile script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3695)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'dockerfile' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3695) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### docker + + * CM CLI: ```cm docker script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3723)) + * CM CLI with UID: ```cm docker script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3723)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'docker' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3723) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### prepare + + * CM CLI: ```cm prepare script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3777)) + * CM CLI with UID: ```cm prepare script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3777)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'prepare' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3777) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### clean_some_tmp_files + + * CM CLI: ```cm clean_some_tmp_files script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3788)) + * CM CLI with UID: ```cm clean_some_tmp_files script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3788)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'clean_some_tmp_files' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script/module.py#L3788) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/script/_cm.json b/automation/script/_cm.json new file mode 100644 index 0000000000..140662bfa1 --- /dev/null +++ b/automation/script/_cm.json @@ -0,0 +1,16 @@ +{ + "alias": "script", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "deps": { + "cache": "cache,541d6f712a6b464e" + }, + "desc": "Making native scripts more portable, interoperable and deterministic", + "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)", + "actions_with_help":["run", "docker"], + "sort": 1000, + "tags": [ + "automation" + ], + "uid": "5b4e0237da074764" +} diff --git a/automation/script/assets/scripts-workflow.png b/automation/script/assets/scripts-workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..60d0ef7157eaed67afe1a06b0a3b09a3795b43e2 GIT binary patch literal 242876 zcmeFZ1yCGYxGp*|h@c_3Lju7A1lOQRa2?#;3GQS=a7jXNhXjWSF2g_)G`RaffZ!Gw zTwl)&d!KV|-8!dU)xA}(>g}##XHU;`_v*j@^~?8ttHYHQWwGy*-3Ng{*soqnse(Wl z{U8ug0NzbtkI&7fa1iJogSDijvbCux2=p>MKHgF}PMf%62O+%nPW+)nsIue-<|l!l z@N}LI6&HxFrYlR^h1K0;i3ocx{a*S8v0gl#t@NV@k$>GJat&g=+121osPaPFX^u4g zh*GTaD%q#s@zP~^)}{Z%=>|<$koos+9~0GvZ___xNJ$>`+@5iy!H)0$rm z@YGwpa~Zc%~_N8d_2f?`=Z)#-S zbA5~d>{h}zB7P9@uN%HE@FqaVG#HO>-srqpZ-n_2ByB{^f44}S%?hNDMVN+>{!6&- zW?9hmFLn})*tdA{ePXsUA--FLe=`gUC!E*YIGRnTw6 zr9)L3R3nZUc)M!lO4I<-k8zNy&4px#4#ur|kYI(pH8$>f)|W&hPhr8GN8&;<98w=e z+A43f{8g(2OB2)f-NCmIOSMCVpwym@vm{M(XL@9%a74ae+>G$_8az_ zVNYXEZO?}@B`JeMiB-ZbRjXPnbmIL4%l(WT#d&335%`lRjLSQo+%7MAxd z_N+b@*j3pTxR(-~9ULl^BULU{*{Tq3&74s5!YxZi8By3#04^kW9-Q}jap<@zy$WNB zXX-Jfq~)O3KEF5qh2<$o|M9E%=L!3XJqaZVjWkSVG+5Xeu^+xYHBPgLdM5gs>80~$ zx-W+>t6yWk>Xqw$HT_!lHSz1(SGaNkpNzZsJ6pRq6W2a@(M5h7O*r`2`%#3>N5Ne) zhTW2>Ba!8k5Zlp9s)X@`ql7uu0Qn|Ef5jIn-(S&6XRFQ^{V1W69Mt4gESKNSS%`Y5 z@Kc9LvrvBREZreYh{$}M@jA7OK;tiFU7pv~ z^3}2$pYuL1mn9UB7yAG$j*Q94-2+56FAS4*ITXe^0Adsv2B zqtXD;L?jbgN(gb&+9=~KvCpB*)i3pv#T?@t4IdRK;jX(6>+;nYDmvB3ZFmg$Q=UWQ zb{&;uzXFF6L~SR&DaHTmrT>NKMLz}u#yt$cR@j9h>>*wu@Q?sKHe_a{-IqqiaKdPen!DPbwA0Mi-(&qsV1c_sWACn%4kw_#hRUxJLg%5z|>5i zZ|aE`0+%R{XpJbJ?XqH|I;z&LCe5HlhoC;sxHmXHLL*5dJ$@j?n$pH^M8_)Ej=)0A z5z(VL%KM{c&^;ozEqpQUcVh+jSAvP^>0TGMI^DX&EqouwGl{bsm_w413OdtkH-tT@FoiQbD}>cV0uFw7dUzDYepB3#>N4FB#HN6SEahn9Xr zd!urEZiWU*YuxfK`ct$qd4F(@h7nlKD8 zS3G~i+{Ik<+(vMU`BZ#gK1xB}tH+L&uZH>1EzL#Ge#W;^_I&sFOK@Otjg+2=)k6Ja z8&zLb#|o1-?t4ZNimzWpJR7AuFe-Zai`hy(XlN|o?aOeYSlcC5ZX!1_w zd1arOoEf9}k>k*8mF2K{jA@@zs!Hlfs?VEv7mwdDj`Mbf)A#MM=cXzZ+;lH%er_pF zD{azjhRyBjX1p-4o}Es=Ap}Jb?$xd+ZN8ZrRi)9Sc}fDeuZJFDj^LJyVQp%|^568Y z{0RG@D~%z&BpumyzMv~ccB!?yI5b-TyY(dRiR_{A;gc=$Nyo}7sX{4h#kJNON4F@= zdn$fZ)MD~cHEKKP8Tf=bJzaeN^+#z=v1;iX!zB|9`BA58eK*}`-`r#yZwxSEq=ZC97g82bZjMTG7lFHk_~jjxoKB{= z>-ZRBj+7Tlj*t#BZQfre6JB{!mOotqy#T`uV>7{`hJG*hBz{W&XgiOTOMJuLtFKru z?DOQbORB#;qXT>kGP`rIPFc6?z18m4u>n5g_e*rpg7XIqFD%#VZtAAjDmZ%Vr|#EY zS?uYa1+IyAP*w`*d-U$O-(lS z7X@{oaH27Nsj2`1c{6}Ofx#fqDX=SW6$Em71_G@cfk1*uAP})rT7!x(@CAm6oUGIp zYP+X(Gz09p=k!wB1q8aOh5AcvZ>j`#V!FOkkj7lTe*+hbLV?Ze9dJg_E2$Uio>QA} zB`ZD4CPrUINGOB06fYc#A^n#DwyC}~ADqUt)Zk{qlP#>>i81Ey7~R$3-?i@=)?rxE zcN@i(nE7mePmO#$jc39AkZ7Ov$?t69Tf{76-XGeGgx-b(*Ft97b|TWIM^}zNsk`;5 zX=L_GgcU6ot(^Px3HUdiitKEmHp&~o_|V(SZ)a|Ne}4dhei7$H|MN*)LW=yKUj_z; z@ghr1^mY&5{`TMh4|+&9b@!hSkEN>c|M~DT%!>Y>5AX2?W&Zg^&k_CK65 zhC<-Y@6J&eShj2o9;Uu;U9}6l+Mk_1x>SRg{&Ueldf6C?lkuyw@v95SRqeLs=NzTn z|K#*3|2jQjtAD34(qaCMfT(HnuQUAX3@ArH{^wt3_}3Z!uQ`K&j>j$J$pg2!#R3q^ z=jUT%hlgXDyB~{-l`=|zv~Ga>HY@eI7G;Z6UYwe0Z=ceE+oK8u@92Al|5OV9maIr) zf&MMRkyiNE8IV@^*BMY&_}3Z!b%y_wX#&|bqb#~Adi4jFQSrP*Yy^&^=s;O~;b_=| z?W`lFL?o zv0_wYDM=*MnMnmuebB)G3lb0*kVAFxfo0{@)i7AK+vWL6W~LbRZu%w^f643k&jy5A zl!B6yl$5d*3InBo0VC0@EO7FBGT`6UoA`4AF3X#f+?FYeLbL`P1{hgwrY=p zt;C77wCa33jOTk(YaQnrnGCoRw6*Fy9F4QLHE+2_7$6a_z*zHOV>OGc~2BH#K#uCCgIVj{prA2no&k)m9GM z=$ip!bwS@CyVGL(!3LiLg%IyEr%kQhkLB{Y&TFcA+dq5|J2y5n zvsvGqEG#@ZN#$Ev*>E5Rf%b(^h+NhKENs8?-GPDOe5DLwKeA?OJJP_}Eb#3>cWndz zOJ6U6vo3TU2H&~~3iS7=+QtBG5cG*hP>_pDumN__A?D!Nq3+DNN&=}B6k!ps9cH=t zZ64-@INVC*LqRmhx@g0JBRXS#;fVkJl;mGxBhQ4|{tq-36s6ZVkvL&ZO-*fW4KH79 zEgzqUySs;n`@%tT@^HSTg^aAMjEwAt0}`41_JJNbE@dOHlM}?!63KaFrmZbGxvi~N zcZc||iHXk6iHX(KaDv=iZtl1^Ztjj4Y%B^PP4n*DYKUR8-`S~KV?`wb?ja;J`MrB` zQb{a5os#VIcriPBFOx!-M)V5pAV24D7!{+Ut4~irZ8ddA&BsH~58x`T2!?*GG0)d9OP_?-)mPy+mB?K3zy36_Tw{N?PyRT>Qz`#!NH@VIyz~8ZP zDTz#kJ5hj7`RFL4k|x;n50_)|#LWi}j<(rYSvkafY8un{eB&8IDsnL7p*7-IT@Lmz z-}>s&S>qpCb6On}-Fyyu&Hjrr#hGz%+44I$!+lm7SkG#06===X)GYfI720+n23y-r z-tpt)E8!Vz-t|G%FO;SY%4P6Vn3{gnFrTj9wR9D%(XiNxFqQ0;m2l4DK3r>yTEV9?#oZo(ClrPAMgXz@ zkE7%Z7nkv}-d-MD!!~6p^z_toqsRERJr}UQdU>%t|K%7Q8ENQOT}L7*6l!hn{{GA; zBD5mqN>@ziDPeU$1tGr)RQjTH0;e}~vW#tDrbdy!=a~%jP-P5P?EIu(?0lg9;GL`Q z{-9&P*`RFbNpK6UYnriY?O#5NgdnLd-Q;`A<>;M#@k<#@L7&@aXbsqpYpW?)vH&ieuxCJWkV zi0p5(s8r(U9r0Lx5QQfHRq9Dn!v9Bw6@xg}Lh^9s@R0hd4hA!XEYTdKnY?GhC^u1F zZ;Nzn_S?-FWfDEQ#e(#cq~?p}sQA!(cDrRCMDRLZ5Vxh zHD=&FB|Ir^Y=HfBQ>S*r=O`cVVj;4=e)Gt}_JHzIcOee3@Ue7kpP9qwj?5rE)R2K_hx&`tmW58f|NL`hRb-LE47ZXGUHNOOq6J2#Yab- zUmnrZw`o8HcelJ!V!s{F7Vv2*kPqRnv=xJ)P>6x_{%6?1sJ8}|FAgp)4i2t|!(Cp# zwhP^rQ-r4O(6orXW{JK5e=dQ~V>%}M`%DIV&o-z;G}LYEwsQr``4EqGmKvEIwubaO zyeL9dd`iImbr#-jCEiCW6mQGj(=QJfGQMW+a3egade*m))49}UR!GA@+`iBkzTp7mT6NkG5(Gy1kI~{p4nx>fxMS@1B9f;OYp{x)c5!f3 z>FCc&9F7o)qx@Ri0Jj7ZtfWk_H;)6J+IpQ#R$egmPaVL{Mxz)QEiEEgbleuEBX#5b zzMMY_hCLQ4w1wNRIiwd8CN{0=PH8L^40~7SV^0byd3C!kJY_Tdz&S+(p4V(%LdHt5 zfF^;5gY~dv#=fJ2gXrjkgXL1njtNw**V4~kag9L~+(?j+E2@Wf*1U!!!O7gVa;Ix| zW?_CpJMh)WDYqTsqXw_RcDsuy?_!_4ZwR9d0}*NwT%hjU$3WKf1H=)5b^OOFE?faUnd8)_4RFU_bD=tl@HdruIWn#jmp0! zshCgN40ay%^$|2^_CGs3*l5rle6Vm{3MG8Bww1wa($&WKVRdPWyvDY=&vy3qWE&mU zrz1ytW)(j?WAD@bwR4|v7I2w9^v##9!(iZc>hkgjKK8+(g zB1se8Z5!m}Y}Mr=xV#%%2u@Fw3%esrqW!ushDVrz5kWV%RP{KRbtd#$a$R!VrLhc0 zg|gm!9e72`si!Xdl2o@Tg;{p;Lf0ax6fCV@&cCkyR3M4Js2H*#S?mqXXH(;Ic0U|e znud*)9+H=`XO?DYEP2PP%(%20up9PQ=no0>na+LrFXJmH)@kf1R9jrkSFAqrS?pup zfrqI|e=CR0O|uE(F0cK^Kp8$`fU-K`Nhu0*P0C zu>tBy5tfj{$j~%Xt%k0*R}0Ui4sPY77f=kZ>v-rSuLn9;xjGaz0fNkaP0fX)hCI$Y z>q9;wTNh`sRPbiQM)`bGQe0~Pi-VTXHb3j&{BETyvLh_758svbC`-vCC4+)aY}LW< zYJewdqr-)&3}@CH*EH5B*tX)|RQG+F9uvsG>|Py3aCReTfI9BzAwbe+S;i+nbg5Hs^#O>kOSTAmY4j*jH78Qm6WO+$m8E zC0RfiHXa{N6LaK97zZ;g9--?v-BPLQs{P|#XxYt!5bmzp?4E`yuwaMRijBh{J20Gm z-u@u-{r>A%Pmb(>2P(U6b98~<*dzifPxI24%R)jG3s51!i}#zBG9*vlX<;dUF<7_m z9=}<_!1@q8O3qhi*zCM`U7z}kY1NhX9Pqt^X)}a8B%CUW>iqoGT-ElT*Cu_@yw}&L zO9Au*bj?L~;!}`wWo75ysM#?`o@f(1&N0EAyWw|VHcppq<%(kNR|1c|w#J}|mG6u7 zf$@iyu*>G2;8Lbm{H_k=1+qu+tU}`%fSDg1;b-_(AV#G>K~`S4%wlHzs51V1Yx>r$TE}s?P0=$UpY54V zyUnACY%FY2D!r8xX#0buX&Nu`A!bzi1Yg`_y;b0<3af1zikX zDa{suJ{(n&OVYxhU{pmUGEpV46Uk;3b_5!zg1>O$&{s1(%C2oo_jM-xrkEC=nmRpw z;%2$nR6BQ5BLkCZ+3PF2N_7=%ciW#pO+|LJ9d(Bq%GLOwKq_@nfBbk79I??yz_}BM zP%TfVt_3m@fB6!GP14#NJ!krh)_)8(3&I3bq_4TdwW>MGSm6_IR_)QX@v?#u*@|6j z296lvo7?$&8Gdwq6IOZ+BLTa_{Xvfq!di)|iB!VJ%c+>SRKomket!>Y(A|$LSV|UQ z&au?e_uDgbtKJ^F0TRDb1%?D#72t`dE^j$h(nzfJiCahm~>GUQ;@n;R@BFY7OS>#*q*AcuWuaCP>*5dUUGo! z0RcP%696`b7?D`Teqq6oir!BKjEwFMS>Ws<)6}OCFG?Jux4C?+&S`xGWzF^6gdD@Q(KBs8B#;SQwLr`6q4x*>$BmZ%KwZ7fsfbx*zwshmqd#7P*LJEQFT5d>8GE}Y&-fw>s`9GHk_ZI zKWb&S?Dc+67+FDlmIvfhfmDwq@0v5qg=^qm({DKD=fYEkd)HoLON z;vR=ttVKjP^fk3?w1Y_D>^X0|VCI^^(hW~^h_|AqqT67@Kyl#!U(wf#?P5(Xy}(eX zKYhO$JlI=af0w?!f78!m|Jh_pjB`F2Ol@Bse~KWP`9FrSdC@=)*eP8#@34J&`S1`3 zLu_CChlk_hcF25nw7o8U&A*%+khDh3myvjC3gx5gr=)fM&~mYAbCo5M)!;o#2R;L1 zOA7|ZxoUSNsfg~|&I{h3h?_xzjCs*9bKPk{&I+;6j)I-zU*GxWggw7Doc{i~zP_+f zsv8+O*$^ucz2x=x^Rh;#vbI*$wr(kf!bdi4Qs#+;g_V~1d8%Ow`>9-#ZRN2ku?fiJ zdKg-VWfG5n%b(Nkss%UT5`imrJh|S#wIz1x>;u(ZpC6RV$z&xM(>9!&&@zP7R^#*3 zIn7u0v+F%|v1d~=wRS-)bvH;pb3RgSluu4#7`VqY;{N0x*SQ{wIZM_fj^xr()x>x2 ze2$2SOd^Vt%4%m1_w%`0GAGyWm=16X9cU#hfmYp#IWmMic1?D zA&*TOW*er4`@67w+1W(=8^u1wUx?&9%e#B~^o2Sva|}qPpuFtsOLv=2&oAfI2kWEV zi4X%e#K|ZGoNQ#S*R2;zLpe$FXcQs*(hC&->B3zz|_z!kk~zyXnLhQe8dK{3u%X1Q$UKoPV0#GEcrC8+4M2Ht$NI(Tum>r*^F z7RDgu%k7wK_bK~z+cfvsZhQ2`xZ_Mglf8IYe(WZT^K#y^CFcMj0R8978-^KQF28Lr zNvf&n(rgodi51aG6R}3Grfi{n9lJ=zn7Kh113q*&wJ32papvUEQ@!wR<0BgPXK~<42LrS%MzhGYxJl zinCuA&?qOCm0QDt3f9}6UccS88NJ4#>0#M1jT=iw6>8y23a80_a2lfbe>Hed)tR*O zJcnT8--w#6=GS~f;%pvU@S^cyc{eZ?`n*5;YjKQ9nagki^-6LsXF`pH@X2be7TT}5Y&K`rw2MzA_4rN}&A$8zFkB93(sl=@Nk=Xqn zGC!xL!Wy<=gMEDm5tFC(z18};b5F~@d3#QFx}9%^uDSpT2MLD=W1$1_Gg=-H$d6Gd zD;`WpZ*qGp{UfWyz3VW$>r?f3fo4m;V~=fT-z>#kN7dqE?|M9GXU`a-016x6=-Sue zRL>;7V@nKlv^DL<`_W$Zod%e=S1|nh_2@YrW-XHYL?y9U|3ypK?WK3bsgRv5eMlpW z=zDKl8xpC&y^8i~*x-lcJuw#*J2c`^OV}X99s8(w6yY4=-jHFNF|k;lp36u|aUOf+ z?DS0Nox?%t>(G_&CI0->xb3K1cmIC?OxLT#yq1U!dwQiG=XfQPI7Kj%c{xW5p9MWf zqO#_g(G@M$r5GF)naO@4Assmj(#5wD1~V2rFfn4N9jX3`W`5Lri#Ofsm2dZ-Kfiwc zaXLwGRm8Gc#;%mkYQ1eX!0rWm-ADy{T|Ah!`YH$f^N=cEF*-I=P=n&f@Ni8NZUDYY z$ig=%selE`VsOi9GJyWVb+y9S-baJo*%hnA-mO;K0vO>z*%Z`Z(qnNrC*t?K?bm}t z=L|(T92Wrn7@m`D%h6ED3RNn|%oac(sl7Wl>2>QFXWhfA@07Q`^>(+4E1ug? zQ3;*XnA~Q{%j?TXPbsqKZVg!C!(IF`{ITz+ZU;%=4E$m^)7_)k`rdvJvrqiOEPwhX zTY3V8e3e*Psn?B?RTX5WL8ALgX(W{PF|1PikA3WW4GMFsRSt>>)Ff8Ivm{IO5ufrV z4dba$U(K%$eQe}bgd|;xG82`;0@IS4-dDX9*%Pkvwf3BdeAY!={?OOyW2Z4lqh&sQOPM&v_A;uAxf2jiX`5J zQw7NGbqDVj38#Er^}*iKm}cWgN>}R)UY2!k=~4zlcC)nf z@UUbo%b&Eh-7iNIRl7cV#`LbLej__Zw&3MH+A($mri@1`*xdrX=(?8CIj>AZ4rPrd@ji>2GDXOUJh(d;@5yV*g86F8qG@SINoM9@xq_^ z87Uyc@91+D8!$6lR-6uULn_f}%r7YHeVfs=6wvsF>p1jgw};)!!?VG{9|%8``c>hT zTp&eatL|uvr1Ac`mZewOIc!IAEH9@RIzDIDw|TC5m&Zcdhf4JPbR!fOk#JohK))ad zB^!ebxWFbZm!rCv%I7%OghmIqKl)z>j zE*MxOy>o=Fw3Lx@?m)vN6x#^`>AdH2TSFIl>gPq!Xz9J2Xq8%Egh*5Ur#lG0YvdB$ za+imlbp{6gs_a32)dYOt006bdne*-a{k7rYVYgZ&_xY<6|61#B^(vSQotDc_;9Q6&*!W*A_nw@1e(9=d zYaOPiZ@#+deCblaY4cpv4=6o{^LyiL0dyWTB6q!n0cQzawV*pWH>=gf*w7FFt$YNq zStu?cJw`w#jgu~(nBDhq^4){pIl>n`R;9|oTmrq%J32&d>kcxb{+E9`=FZROmFBMM zE{2cXYVQEQ02pF?l^z$VaPDG;s$mK^3WBgCPryE{v2MM{=ezD1kMlr4+ zkn6zEP_C?;9J((;)0^f+yfPqXPfsT&&*SURl%X!Dbo&XtAZfPDC@JvdvjE)YdOnIv z)BPe*X+we#>FEWVz^J-!Q!_HOcQ7*0eI=po4CA&ig!r%l2L-k~w14y;OG5&K_g$)X z(X9mXj)CpZgMU|n0A~1a^d7&h;3!g@#?5iI(eG@z7dK^mM%Ut*oulR5rS!T?pXeIOsFcibSD$iqPJ*C-zk3_$BFeS^v9 ze-o0XFSEF}B(r#3!~3M8PO&PAzOZyfUf63dPDwGt z4u_|&FGGk==xm{_cCN{DkisTP{C6_9Wq*bMK{H=~KfuDxq9>2=_0<^}KE}L`?j=x? z{nEZ*6NzNHuVAl=Q`t~~N$-dZIi*1Z3TUatY2VBYvJzVwRcmxv87md?h@m7W;df`< zA?OMw)75{78X7u3h*N9pBBdtX#h>{BL~FlCUfrJcGHv14;`aokUIY|~Br$<=%U%_b=I1#bQ z=~|jJT_=e^=0{+=h-W6nnLtz| z=4iS&Y>R9d#|0f`ekSb!t4ME<^1gO<7t(yL!0oi)&a z7G))1EN#mxjqPnF|(RL)GOJzrhz>|9;_enVp^KX#%$L@+PWgTx@I{9PD0{=j$$W=TcjhX%mXf z_Vla`b8{aYf=UnP2d7q{is@w3Vs1rcqK(=xm^TIa8MyYthrvO`lH834rEd&^w?#M_ zmK23*&VT%XLmxcYt=wiCXDXLnn5Ee!HEI6Mmf2T{3*7V8-YjY}fcF+m^;2ISrvi(; z*3PJF15TT^Yk(g-O3rNda`6|%I5Y`t1R9ohAv9g!`NWZi7vqqQdBh4yl2iB>aDbDj-)D-Ue;zqY;~mmyzOWf7N(%|?iEs&U-Ytd@5f?{g zNC_cod|jHsjrk^Q>;}Rf+Y4%zJc6)Uze|rVbw?v&hx72N(HIHPqYL}8IyRsZsV0R4y zbpHED(DH?|S`YEn9ILbKm!9*Jk0i&9=*A*7)M(LAx|IV#W<4dFB=A0%^X7gZ1kaU2=EGPAg z(E56grli#F47f9qtXs<}HX0%h_mny$pnJcI1|;0+%n1A>3|Q&uI8^FO`N5FkI6!d>M+!N$iVss2?s{#;MdWU#Ktbnx=Pdi(>Kd zHJxn=%YB~n%`4!~d+ZFV&DeK<^Q;f`56YmVjgLQj-sLW8{zM%SX*_aFnweb6`$#hI zx>gbr8i(v`wyuoBu>A)qjJj}mn{893c6Tps3`P?3@v<94@$|MwRp|ce{NUI$IcMSq z;Zy$IlU!-ppB8>9kMF9b*9h=l#l*j5B7JNkHbd_!? z3h_99mM-8l5SSq3CfAM1+)bVodTXcAS*O}T`a$Q33UHeh>h|hLO#h_>;*YLou;VOG z!MQ_kOIiAH0^W03BW=@m5L!yHK?0 zNrX`O9YMZHL-*KE$u#^m=jbXrJ?S)onGmchEN?!rSD|J@lu6vX_46>!*=+d^fl|i# z@3=U|Zi@L(h#1-2!?)eb24$mJ@}%XMsiu$Xa~1$Zkg~b(4=o zB~V!0oGc+Jn2n|OJJ4Wp8#E!|66&E2`W{N^8b$WZ%lP|+Ap6mFwPRPaU1{!KOO=XP z9zk|Ho{t!YGvdeX-7S3nFd*=V^TXL=%HHsN^Z!u#BdK&Gsn$1H+0-iIYenNaP$2ja z@ipuEDhNc)u8x|XnYHL{*}O6kn+)H0u;yg&sP^$YL=JeT3JR`KwQhVUYnP_bHvdbr zO@rSt)(o=f=7>xKm!inRGH}W)|J>G}PZtJ(&FNCy>r%Wo&c4J*5j8(sQ2OwyNPoC3 z!TQx^&@4i{HkWrdzi9XVy;8-~8Jli;ykugV z9>u)L`qD(~aNg$W`an~6%5K(VMMwsAr+dzmwhot-GL*4PW)V37+f`6Ggnw+9vAPol z>B+^8tjv_p1v6p=Z;7P@s792Zphik&7FvZ!sP_ii?P9PHV5p^^pJMh?tii%Av`vc*Q91a?X@h$Qq_ez;vK1I_I<2iK_S|?-)>F*UGmpivAU@m^y@FIl&(3J+G9BuSzhvEp-1{?+&0|!m!i@QVG$7l0g?10hl3U$fECPyl(afI zuH1AtXU5;&+}x&e{cFku31ja>sA>pUTk|Hxr>j<`IEHfNa74^z`I2x7;rrGw{`Bwr z&g>?Q^^}8^WOr%3<@+$uDWeXYI zz@Bg``=Q;685ufc+Fe`xTA_UNl8pcw%q+c6+t(2!jX_^h2|ddf+uFYT(*l^U1$X!C zyF|44x?OLd|EV>lA9z36YnrjfSkJG>PD2ppy$OA!qw07^LG@Ro)z~cJN4ooIm(q*b zS<_tkBn=LrfPYVhvfb-=(jTnc_Ri2pxO>&Lny*POlzG1P1bVBxt?Q95@b|Qn6$!6r zMcM6V*A1!iM6eXKd#UG%YOg%XY{M^LrhaOj=<0o7<6X~1kQao=aRICVIj^pOfk(MI zvQf8egQ{_uAC`YyVCLfdt+`~D*CrV4!1{1$WPz4NXTDw$1yE`#_WXJL_tk`jXYP%*2E|ZQ zlkodDb{*Jg=9#EDe_A(6VH7X1y|4c?w`c!DdJRfR(AVmHYj|l|*+PGcCEY;eo$0;5 zt}l1&@($h3r<&&Bi}<3;VIh3UN8&&yv&xRD{Ro-3Ul<&?Np<;}#uJ*K=DQmuiyJda zyqxThqEWy9fweR4$F6oBu6*_mGeJT`A#X9(Fj_zG+2wiO@W&RHe7OEXtgpMr)qXx4 zeqBQ!`(2|<8|zo62h+6>!*Q7Xf4EvlJoQ)Bvdy7s??MM<#;Rs*hgcP&oBRFgpi^IwLG^S^q~@N)=$Pj|14%;kKSWuMF<3Y<PfAzV?)CK(|g_WP5si~hx*&x1#t3D9!Ry9!1h%)igNY`2V+x-0Z@ALBw z(-Ylvzv{j^9c}-RnB~HEvU`q9(K;9B{5KV(v4am4U2v5-Jf>jG+xqmKb?7?MIf{p& zrq5w(d8!}ZacfjeA28pl2>R^crUZHQWufzt7T3hPWKAqn$H%)ba-(-07mrBxqcef> z;d2>zF0Qk?hiUf>oc45gj&QN%YtcS`6yNQ%f2)bpJn-(NkAITLk0$C3Tx}TEdikBs?tA3 zjIz;{`d03lykPtbFW#|2O$7}!xk0F1;|!v2;-ch)2@=A7<&+4J%Im_yP0TxzE#HU8q~8%*p9-UV`dE zVMwwc88#WnV(v|;7Cf!2+FuP0Sd1|2qn;oGY++f`~c$XVjgvnxERPD<02gY*u42n8UEiyV^6EYyOAgQ-YGHOQ}rIe$8d$F#B*VwWx1qDYozC#J6wd zrdx2X++2G4+}y2;p2kDK=dW=HX2Kx%==P=V6Sqo}PHunAxgXaOP5;=Z@{AZ9fG*nC z`5Pxj(W2GQ_M)QX5!_SR)_0~A$BpI{y9>g^5n=M_8fgtE)T!~m!J^$c}`HhB1P3opK-7ts{kX3MX-h)xN7jz7;s4qr04-u(2JI@#PMN~w)l)9Y%FEZC&`D_MgjRP-0z@h zTN7CS(ieFN9gY!dZ}i~fGi;if5uRYQlzeM$@=1Fsac2gfe0*u*K^vh+_XHBNnEMFV zf&my=78FH>j4)udbV->-u)Lv^8KDe!l0TxJP-?tVlrVc7!f6nPiJ$5D`1Vb@Vo88a#lwq9E7h>%aM(IiEHE^%_O=S{1URq28{NoWUd- zcK}e{ZTNI;cYuWy?P3H0xFJXbGuBqkIlAR=Hnf#j480a@A--UNw3!(5dE|~##SS{e z`5lh|_xFAUhTa-dRjL6jXO~Yb;CT2*%;9R^54AA(mLQ{ z@R{j{Jj3U0J}+7SMk-nV;=zVh%p4+>^I6|CG?cYO>3C+`1&0R&v`fq7$bgIElOudW zlrOZC;Zx2b%cHFkMzZI2)%np|$;A6nN|DSTLo@vwYJCn|bv{lVyfcCMA?q^5Ok%ia zzTa23>n>`qtkQXl9p}4c?%Q1f=LQR%~~zHlg-0ewoH+nF1s&U zb9OxYVfyVK2*A^QcQk;ETmkSS#m&Gvogks+OE{(nAU7M8B9{hG(LkxTui9AIOW0T) zc=}k-3_%`I|8;d>Ws?($SSqG7uGmXb+6**OQ#8qieiZ>PE^q!dH@y%4hnrq2xa`Dr z{@tNHaPCl*e_9AG7Gp`3#^UteS^g%KR*GIkhL39>v(wt#bvW$>e%sRe1Xx^v*Vql_E6BC-++ATlMMn;YUTDeHoy6da z%$^yG5#{ADj^?{!ZJ%4ZqnJ-4w?d!wrKhIbZ^Mol7ezZi9hLJfHYkHNnq5y-*=Q+| zx1?&|IAgu?vwbILX|lkCl>pqoU~9#glZ=lU)gNW6ZFjOvlz{ufLag3*RSX(`%_Bta zI0^2qAaV9bYSk)8ggL9b^}Q~ZBzlb)A;b(bC^a7uIc226*rTjh+dW1a0izh6pR9ae zq3N@;ss2laO&(h;P;p~aXe;a%V+4){)9Yj>9 zUMO-bCMJWQQ_%Z(i+YaSjb$<3ZmIA25i5{;a8K)Pa;HHSKuuhRE_;#zWE=NE5Omn?O6AJLolN2Tl5bAHu;(zM}7Wp*scHnz8>zHaR3eY zU7loSie9V)%)&3kt}H!jJ%6J3wcPDPOkY@PT8U9CS_&a7f;i0>0w1#9f;H>$)yBK`6 z$6=n3hT;mFV8F7)Er98q!@)vszuCB-Kj-Iv{v5relJ3!;fKx1n1;DN^uJL?EM z;*qW=`V9snutVfn$VFjanJ<562B~(P&!!Okju@Unx!* z=qy3vj(8~PN+8u>e>J%&T%iYmaX}#13uKoL@Y}5@V%Vx#gFQ+oUZoZg|08#UwytSu zktaFP7i!I4Gi^1HVO$)7YTrPC8NeGD{O_JihjH6zj3L+0gDwLOw;8KqxO<74^Evkw zjs_|$m$_#8q-(Ol*hFIvLo}v4f;4SJl~ymLDD2z6+vHf82HjfRkhcBG7tzz%>XFO; zlK3E}-0t3t5cz|Tz>Q$}C6@0)tvh?@G`+pOhsdvplp@s|RO3JPWqS7uWa74l<*KIF z&Z_0Bcb$p!-vX6BMl`4DEDvn9+WE^ats78w zxRANFgXVMF?Jw&gDxq99s$(L^8v3ScRjpC<)2>wLXv{3}avrs{zrUA@LQZL3tU9KH?AK;$m*@;^H7l19yLtqL3Z( zbTpLc;d~o+t>APW36Dzo;6{yl1I_#Us45Wj%T`eB6{n4)OnPTSED7?Mmyy>e#HO1f z;p}kVY>um@MUp&$+t*|84YGPjiJhwpU2zX(Kz-wYS{4LU;1MB%gTf6bFt@x>9^}ae zw$L^WbQ~;N9#px-wfN5Ul9SunRVf5$RLf1iF4%q(O<+L1!Ikhzd9#Y!}+Gj-Hml1AGq3iRczJMd# zfMdwjX$$q`q2twAz}0l~#XRm+$JOyXZmP8@%3=RGra$Mq>xgqtz)|J@!`^#9HMPC_ zqHJ}md)szYnluF!rHDwCzLBP=2ogX_qJ-WNLWiJRrG|(Ih*UwO1c-E`S|~~ffe>1h z&O-9hLjC(YWDXQvc!+pDf=bE~d{nBPufaf}sG z)$$4sNALRlIqUWjUI!;7uUjj@ESX>VwtogGZ+<@l7NS#Itm-*qCLi<9KMfHfAHkd5 zV)}=Hst7v;H!Jhu=D6~PT`Hcd-K3epjcyX1_ji^hF7C}_hMS9v?*?P5W_o(lGp%uU z2SL1|1=d_E@<|tT<4pPQvYe*01KOfl9D|RcyK)ZNFbinL(r7GW35_Lyo~AOksN1>BpWAccox%Lf|6$uZ0bO^q;+Ig2L+$fXm)jXcW`ib zCw-_5mZ)B*>)_Vu#JQ!6pU_Fx$aKX(BEU;+ez6^u%<%K^xeLk3ie!hA1vBrM-zeoPeAw3Y^{)V} z!?z6@kb=3aYJ5jNM*;$DP-ov-FSbVtiCE!;)Q-=CiY~Rv@A5JqlEbqGWje6sgI$WP zF*R%1%PnAcXXRjIo~#@HC9ZLkrNZOTt!gDR6`l$;Awk<$f9I=Ebq~f)PmnO-8 z87YaBQKl@w*+G~47&bRWs+{-4tuzWFT*5Wyb+v=tXt|}W$+*c}0JpE<`mJGwT7#Y3 zT1hLh8Kg1I<+2V_-KNZ{rPrt4qUD-^{>}34YU|_#NWy>KsTD5NZ7Lssa=h-{OGT6U zezjill6N=;xuzPY{4oMc08JT|v$>s|lDkCidwV#vYRGQM~fn z-ff{HtxED-o8js73%Z&Mqp`TvPqFA88(wgIApO_%x;z#8*#Q(5NS9Oz$EQU|+!NiH zmO_6uE#e|Fvnt>jIyVurRa zc>86;IuRZ8|NDjUOi`!a$gLh6fy5&EFRm>CJMTPL<*H*9i4Io;rA~lj!88NFjW^gg z34LR5e!B6$rR|r!z!cyU>LJ7j2ugqm3ls+5J0q#k1Nb8bZ?I9t;ZsNba(75piG0g**aF%5&OG|HW z%k-Uh8r*q)ee00;G&*KY^8S7cu%p0_2|NwxE4#}uJ(9pADOpI2s&7D@4ve*AcJ@i$ z+2mu6k7h2(Kw=2lOt4rxNNvSQEseF9Fs49_%3NGn!n*KoPGHi`9g9A3yW$D->h`Lg z-7)YP60K-aF%&5O4%Y#lWFtxET{4`ZGL}fQR00n8us_H7yS#pM)`i7h=!P9~pN0?R z%#cgP7atLU$46*vX8JYR+}xrQmA8HnL8WtZrXp5Mhbd(FZ6TzfyzPPxuVUYTgGKEW zD_;5KKZNiEh0F7TiMFLYX^jm?%;wzEw{MXG|NIlh6y!3eZxeYOpr_q!?%s8Wq=<)P zylv#+k)+I67w}b#*$R<3sVpd-O>gu0Eb&b@j?Ynb#$TX0c= z$~FFD#5CcGcpY}Ks|ON;=;W928+D*Zho6Q0@BCn`9)HQi7`%(410?#%pWa4C;%n8f zi|%r1QJWi%ZmbtxG4_;beL63$Cb>KYdi>&LnygROAG2%yuAjuI;a8JPQSa#G&mZ6aY(`4soiasha(n22cxg-N3&J<7^~Vp; zlj>q(UszJac62Ib!iY7VbN6m`vBL+$@sOwwmO0oI%+Tzsf?d?3W>S0as7-iGUQANB zeCZj%v2MyT4UZ2n{on=Flnw{pBr?rZ1=M8^|iP6 z^@UY)?ok{22Hh#-#+XFw1Dk;aD&v-hN8T{B!jWGyM*HW~$Kc%TrrvC; z6<_=VXqDUXztq#74*usc&Sy;3YeVMoxdLjeAFB6TVGF+Bx33e|NFZiD_T)BO;uzM_ zc+^%9eUf&Ezl<&Sy}1CK} zlEvgTpxQ~QU4h{~aa(t6A%tGw&`)-F7po6`nkZ5ZnDxXhG|c+1QD#q|0}Pm9Vdv7f z4`XBh?6YpYem&uGAvqcU*Y!Q_bfv@Jgb_ra(de3u3PY5w=V+ndh&Y#Dj_=uOZ>xfP z)tk}?HG4-jF-E1hGaqhKj4?mg(9$*3RC443sXVsET+{MHaAPzdwpN^|Pup1h3X&CH zbyj4Qb;s{JL6TSb$Ni0fjoO9091$^6j((Oo2j;VZU3kL^Ia{7|O|k9=Q!;6Q%B+{_ zWqz}>au>_Sv6`A?!yg_@hkqGvvCiF)1>dw2`*U!1d;EhZx_pkT?DIoo5c6X-pZv=h zrRpOthRUjL?25=A5ALy?RIBGm&BvA$yPCP_8D@$}3tT<2dM30wrP4|6ICSecRLf+dWn^I zEBTje21nx51;)mb#qBntosa!iw*Q5Oe%>_>;C4Y;#V55?irunN?GkD zG|9`FRoxc)A}6I_x65C2_!h!tz|pn;hg`%R#72etkKsD)*uE7J-L!lK4Nljho+nB) zZz9<|XK3b~H*6^9=sG%#&GGJ!!LF@pfnvU6Q!~~eg;LHl*S=U6_cabfLRl^h*M)(N z>wxC+0UyU=!wQF@+ovDe{Mu_s`uk!X{l6U0@AIz>){@c534U8kJxa4EOd2vleUv@j{mcU&wMbX0EX zN{1%+qz=tF=dMS@q3kkmfR}PSXfKgw6E9otA^Ah;nXcuA&~#FKZ0>im*>utuE*>7P zjS{!B{eG1*bI_@OF9R`Nfbx)u+Ho;eJi=wlVnn=8BQ^*@47+8xCOv)lcr{}?*JQQ^ zjp7uyDqcWJ5cklu?o=Klb`1hY)^%>IrM!WeETK-pAsI`G32XPsa61=-hTnQohu6Af zwbPqxrji*E7e}|3|0@0i>QR)DZY*`stg7GKo`vld`pz8`>JEho6Oy}c;qvoyWMpKN zU`=kWi+m=zS35P|%loOPKO~B(MOXr$DFq3$ootY(!EtW~N#KD9eDp(R3tFj*TnZ4P zcTQvV5hK|JqjR(?R~iVORxuaQ0V{(|vHe@7$RhdntAfbOL526w-EvHS0uP72?plw= z3q*5QPGs1>dF<3h$SJTK?^n5EkHlH4e=FNrHDJb>hh{H=FZ@k&H}RsCiP`KDCvj6> zIp{yv*Fi8Y&}bozTpRMujA4`HUKAnEe=19_=(==-~i?f4q3*gpl&1}qnbgp4w! zTvHz)t-gL$W~?o`gKHgM5l1gPKgNK(=wIhT5ts}h@^;)#fLYYpp-VBgBKBGDmu?6XTe^9$n4d=t$lSPd&)0s~}1)PPY1zzw80oc>U5>oGM5G7>f zAThSo?$9VVTk>27L?*qxMsOSws0`=7+BHE%6>Q~B=L%Ea3j_95^?yD1ETkN-$|8x% zS{QFjR>mo9k;A(5F9!)SW#3*%=)7Xk=WbqxA6~WF!r1U|OFa1C>)u8Aact#?aUrS| z6iT$TBr>Jk>Mqk9UDy@T&lD{F(TtvAwcq!Ti;IsBoqi+>}5dE$UIV23k^wxH;0&l4LI$d=H(d+ z^euHu%0o=g(z%7q*MchEyZ11Uo->q%O!54s#yipuWlLi=kl1m&$yxN^+HPxm26Wze+V;8v7as>=)I`Hl zA0g}<98U||k)vfYA~G#2gnz2%Hck=$%5Ul^IrwH_SPD-r^=|{&`uRf1)=ZnvL)DSt zzy-du=_F()iB^}iBan!-=b*Fl>#+NYe(Rm#FhD8w#mBl$+*~UivZFSeGJW?jUBOElVBqERQ_BOJ(&r~EcxtWGM5L^+=EK&?@h@x|3Tx#{ zD@$yOq?|R6(No8T)}5N`w^1Labc*Fupfsq;pF6&r{-fUuBdde2R_){>690IrZglY> zdhMl~c!|xc{%Ucx>C*0Q^!9yhappseT7kKhG4g|W#Mb5-v|kCK?&8nv$$(@vZxcqQm2UZzKe zr#gndY-JB2OGd~Hv5)@7O!@nK`W1O(s9Rh#sT19$hv04Oi3b%Z_iejjS;Js|WE7Fi*3P;KOxlp?!%ldzVY_!Tqftx);m9E#Wu@o3 z+u;ajV>Bd$Kto;R=lvMAWV z{&2%(KJ@sw+e$M;T@bvGdoHu@`alVfCXeo5@WnCoV~3}5RwsR@;^PUt)4;u@OxsJXt;CSu~?pRtHPTydDRq`>N5!|%sHpVy(xv=!nF0bWVUH=H-O6X zm&tvV>41N~RkyG3L-p=`PJ?~9#U)(z>)GG$ckp^#G1iQjipcP#UDLco=RGi(>!ZCg z^cOjKP?%Y@c)pXBxr3;&*F@GhjJ_);FsnO(sD(s0K8HRFbj9!Zl*}Lgi)JIf=bZut zi0Z-x$Ns4)zkX)zN@a(qAyCnXX+kbgt(bPgeuuHx}$v@qxHodjhee4RSG#P^ij|G_CiSkYt@g|@M6T`hLH-@t+Db!!#419xru-5?p z@GqDHv=0BnrI!Cwp;$cyZR3kARwYaTkd`EB3s-Eaxa{a2Ga^cnw2ePgGbb<*t9rz=-Jee#^m%NweS zXUu@Q*#?!dy#oDT!`PZ7E9$YxP4hA4*lSk7;1Dv-e|-_nDE4bgr3$bt76Qn_J^~Ms z>6X7{*4P78p=h65&V7CH^XDI5K{he42ylY%$SoS;ke1ptn$V2}R(0-hzCb_y>v%BN z(|8~QN|M#wr|8jx?XWUk1==8w~rsfBZ>x~^o7UrV< zC%XUtRQvj0xo3V=>!E_?ZqFG9hk?bLJCZeP?SusrW%^MWX-;O@f z0gB$dxz*z@9`rI6#^vSPlG)on&Cx!6@*RY_yI53SZo^sL%1mF5#N~$Btr_R-)haRN z4KrF(8*uHR*D%WOTRP1&M(>x>;J#RMLv7_SA1{ZjpDgeBPz8`-v9r5>-_DL%4JcWA zfvNoylQ7zbnwc5w?ym6EN#^Ce>|bsZ|Kb7MJT^OU2VLWqajw@9dnP z@9czlLzTQc``+7d+j5&#R>Pf%bks_eKX$k`p3^#kPVBT7c7?Qj3| zTC0tYjkB|lj|;?K6Pno~eBpg1>tN1eaGsnX6 z&+lVnpMu1XP#R36cI#pk#{w4H)eKFI!`#7+ zLw;^<3oSA+!E7Fa{7)CY2HPUqT*COUGM@rHyidjUhka#ihbec|S?Ov`W!&m$WI zttXkEjgC+CJ9hgojLE2LuXs)&{Bn3*KyO%2Zht9J^T>)2YQ=ZOz;Jf3NqV+5)SBRB z+3ejxfpTiUDC+`C&XJ-yA`7g9q}W0}8+UjLeCzGA&Ka7xJRGiDbtxd*uPB%2Y9$-x zO?OL}4MyTsb5CgG$oMHL3c>cR!S&twaQO>Eob%`9-j|; z(>#i?Pu_2!(dvq3auOJTa!fBXF59@8)dqmIV8i>e)|nd#H=p;>t&t9#CO;vF*7nOqQ`Dz3cNQgYM~USrPaOqWN_ZOgHX|?zB+=XUbp0@2x`K2{&F=#eV;AiebsMDHw2x-+8y&XGDzB zy>yjHgFWb6rVzfJn<365)atOzYQme5Dq3Q+`Jrl^5*%>OAE$wk%ADNUNRm`>V>fzmJmG5B1uMyMm z6n~Au#`0{aKl0yl<0r~L zmfo6wuuSC;IJ3OD*4nx-jx4E!HS5X{7l+quRk`L3-y1h>+`M_CP8j9>L_}0nOia|Q z%4ZGGptcmzQ6?s6fHeZjf$s!v99vNAGxYrv$O0X)-*p(g|G#bF=b148e9`z^MpMbd z!R2RfetAQ0JpVUY<(e{CWpvSvJ>1sT85!2rE9DH?m7>(caMAfTxv-&P3f9__Nz4`X zfpdQ>H?mO0;`D{+eTROWK}c?v*w7jq0|3cD;T*Q>fW8!%Yy@E9pFHU>D5gDr2M|0r z*U>RIm+F{0KfL|&Jdq5*k^ok|!@&0@GfMN_&T*hxIXYr6j=Aef0$m*WpRWW#srVCo zd|bkZZ!w?Kp!yXiMkxiKYDbF}20*TNNAJ~cN^5I3HEC;i6ey&Ij&7JTmj@64C83VaVQFDdwKuKsk17EOlt0eBCL9es(M@uso#j7&uzRM! zJ^?_S%%nBCq4@adZw_=I>vs$-sMTs)W+6qOe0WvVvH zZ!9D4`(4#_Pm~Aos9r+cJ9k^Fv*r=VU6If_{%Vm(5p^0U^O&y3r@joIFljMH( z8^?z-*mMs^1~)9(-KLcbna?1jdpapS-&OI@<-b}BY{D}8yk>{!^eSKV-x}7|>gv|k z($hyr)6+e|zppJo$#AL{00|Cx2~bs5s5KLI%#wmu8?3cC=s(_C zU!N$y=Ext&fn*hEH}GdvysCwh)Z%l=Yf2T8b>XQfm^nEB3$va-OJ&4Rb1{O3mff8BfN-w+gYr z)k(f8_q3UO_m?}}rz#MNAGtSdmp7s$ERdFU8>s7>qopBt(VOuE?bg z0heZg?h*)pYVHt)7d3Dmiq#wmPr}-^TeEZe`dK)kp8qL@7@(sCyVg=e# z73}d;y@7VjVn#q--#PFfQ)fcIQMnI#nOFZm&z?gc{jV-V<|_PW`OW`DF>X6Mtd&Oh zDSNGr@@?J)w6@;1Su+Tr#+acZtaNIr_H(8Ea6Cs=yZ{Q;Fc;`zj?qDRxHke;F2@9) zJORuzz>;$Dk$m03*WiW_wjF7j5!{L=aHKk_6RApCe6G*FEy>YQ(br&Q0ysK8Ojo%5 zAO0?muzPTO#$Vc&D>J!yLlvHI?1?1+wGp^`ABa$DI)-P9_seSVG(-ua{g+0*td`G+ zgJY1$oCB<)^X172%x@xWUz9d~ZmlS9-pT}5p1iRZh}oX4C~pA!+r1SHmxi%9iBd%H#Tv74rl_84rL?~(DLj&_;6J7q_!D=>mKd5J_ zulVufG4?7j5U9^**#WXuw&&63PuJ{0jllNmao)+cii3e3M#jb{l(EmMi`&BYPr({> zG(-dm_1lFRL3OTIQ3Ze3+i3I54g+Fe_aD=xG*{#rn2`k=*Y+hVs|#i?$4B*cEaLlVK+C!yjm(y$cf6hnSsFy zBGGfUM^OXp2eazi%+l3e2LFE6C9mPg%s0ohLE8#&gaWNvm}*kPe%0$Ve-pwZ2ih1a z70haZieacR&sutpjp|@uy+z0Ha+sLX)?%30aHLHV7j`VQ?jC@k;}1WeS2nTO1o^3# zmIZ}V#q8}TC++RwG{)@}tq0ERe0RfcVFr%XYYwIsV5C>UQ(oN5B;uX821a`R=6|nxBOk$nj8M0(4x+1-cwdRhT+sb}Pv6&-j4>8Mgu$lE^ z0YD-j0@JfGJ1ZomQJ3g-+D@{k|50D6Y*9S4UM@RM6k%7U5Y%}Vt_k)s7=oe+u)#&o z;41}&!#mtXHoUs*9UPC*w-8kiJr*wPPAM+VNuWlV%0Lj6>Hw*}0A` zWyN>i?kczm6;ddnVnliIl7!ojXMMobw+l0!yop`9z5?h67(YLEcRz*4K9CdO-YGwO zU8iQ@=5i!R(v&1AHsQ{Jr!k?ybnx6C_)X)3C9)wOxji}64pW`G343b4PC0n0I|zL( zW07m>)A%VGyz%Lf%~>pj9M8I<^KJOJ}BJm)Z3oR$^sz0m)T5J z&}HlA=iuO{z{oFMePPOM>4F;&0@W0~-o9J=_903TUmqn%X#k@oPRP0PQ{4}O?GpfK zF89k?@e%s)yCT5d!ia6HE;#ntc3py;$Io(DI}52VXvLk?Bf4O_-;jShM2> zzgaAVq623;Z61G?`N9AA5_!)a^6bDD6GeBB3zaf`|Dt#!OTZ^z|+AD|E0l&O@p z%>}R1#%hYbB~@EfcN8v3Ev--9zO6~%y?lSsW=XN(VC{C=2iG|TZe=BfO7hlMC3PfM zx4lD96-se9@#yT!+Zh@fHxsib=^e(dT}SwwoRs`H7JkA_b{*XUW8Zu8KYYM6nDeG0 zeX6W&D_^=E7vG-u@cprQA<)28^Z9thejC&Z%IuFhdCL`Ob*XV zcw?=RU76;`)@rOXJua@-3RZ|d1i99#O&Wz7(t7n*M~r*AupuCBGYwIHW0d*X`Iz># zpd&{Paw~BYEMiLwn>g}IjUK&7OiW5jblk?xHEd(rDAv@;0CPmT-z!C{yYPU5{q?M( zL+l}21Og!3I=*ZhHCOoVZT#nA??1`g8BD3EgUmIXTP#RIE)&a`q^WvrA234I7N zXt{?AeV3SwHcdz-EEL+52xP|kpN>%mZl{S!TT-cTtCOsxA951ZT!c1EPBJP^e%x1B$bO-@_0P+Z zN}g(WW`cca5y2?daU=O%NH{y#*XK)+B<1A^`fgFCCSPnhabrxU!vYE|lz$fbHpt6( zj$W&LIOn{X7gDpopshcfoX$ClWYEew8BGAS2z6iw<{_H?$ai>(z8L&_$o{nFhIPS@ z@&A^oRN^m$Gn69ODd!?Yimhj1hLLFql%R}hF^?3h&_A}ohHNP~pirS3Xwa&nZyZ`x z?7a8o+!EQwt&nx2(qDeNA25^yaFu>kw)LBGbmi^PT`qG0Au1^EN0MBCa2yI<8MPC;{hihQHwBMDRmw`-KE4t{`P^gP%%@&yUqFtUQr&L8o+d6)2v25XwKwMaOJv>b zmgUH)2*sFGfnCvNt%QZs)m$3`)J~-$f_$oir&^2FG*~cX`q1}nwa+LI4<`(xuKvyHUcOyp()?9d0yos2nm0#8t~ zlgt*;uie!No4v8!!ueC~a9-6i_DW|Y-#rmPd3A2=RD`gVK#cG()$24JReNw`T&RNK z=YOD2u-`*MXQ`|z)V}UZ6sg*0-lFn0K-8BVuH85ThWgUYXD-;{uq|K2(iGmR2vm=*W5EjAlf?z%MVTE%nS){uO)&+H}{4& zG#Xu#V6J>V=tN=YcD-%))@7ikoFFstv}ki!%|OIV=C+P-%$d;Tr@4zIWW|P$YwTzM zs%A%9Fj_QRmP>PWcrO{ObIQc032&A)sFdR#wUDPCGH&04s#P57HQe1K~rdJFfRz}!6JJ? z#4DHkYHRIDtbo7xtc(Pg>y$WGz8ClGn<JNVJvfE^`y5;Pa`}E^v&wbYt2I0x z1ro~_|8DlPzY?TAZe(`@f1B;^`qcf#Yn}Hc3dVJ|D@F|qJwEUKGmO8GVy{I^@+`&E z>cS)>DC79KT2=>K!_kd!&EZ6j*{?l&577qB`Jk=a@(dIO^uI@(Yva*%rXo7}3#8r> z9XQ90_Hfd+^wL$`SgYKzEhwFA>SVoqmH#YtYU%aFlHIn?(N_z8vP+4cZ!)Of89WWF z=1E^kIp}TQmLLycX#ANwAz{9cHNUGP!bQ)!R?tK=2WfsMuQ4XSo%Mk9-0j>$r=tm{ zQDGh;zLH;$=kS=HUZVszVD9Ac+PFMsPwA!V%&paHSpwP1a|V%r>;-KJn-#$)~nvPDboktrS=` z3@Q#CsxGgeN?%}CGnJVqVTgHxYwh0LzSjI?NNKuXnXc!zli9_hvJN>%Cr?pE>8Xx= zhW@8wR6=8l!m|H%*AQ{9n{Pu2b0KvtzXNY}X~{WMSPJ9$-OpjFf7c<9g`d~8u889W}~%e`(3#~$UygoF5aLxv=z-t z;Njv{@W_$SL`1ecCxwF9V&MsL$i+d~Njk3p=tgJKLLAjYW%0Ac_nY~DpQ7aF7g~o^ z_piw|!M>2lDtaUgP?+;cjIE~NKX$XbUYXJ$$^yLO8-Ee|7!$=ytDU0w$2|d zZjdEEx46uM*g9!<#!C2XeYc1zemZY&pRyrYXytukWz-jx&hWL)O@#*MN5_qY0FNo; zyvRS1$`FpVDdm^JPD zz@v-sRC@)jwxerX%pvxA-!(WR)4%oi^MHzSgRI~CN5uM+@CpH%|A{eOFxkA$ua|#ARemS*rQ%jrzaF z?}^MKoey0#YZNxD6E>Pv#jLFz6HgqAHN&ixD2yF-jq_;Mn7X{3D=lc*wC3I2FwmDq zyswsuFZxG5_5MuoLwWvk?OM)4zE4I*Wgd?APHj&&bF44WjYlz}P^dFqm6SuGzB}ew z=~~(|Q~u)BkVpTIb2V2yWI(y}2L!+le5qcRQ5m42c3qC5SK{|uibVAN0=cix%7*?j z@{X=(&pu}qIrR}G$i*4U*8x);6#E_=Jd?*kF?iyAUh;`ND1p3{<&7oXD^%w51M?(4 z>>KT$lDbu9$QQ7>y}=h?v>gz#s=D--1>{z3m&o0#Kn@tAqwbT^9$S?OWPjw*o#>Y5 z_qm#o3`d8iC3HIAsc{;t9{lp+zB1a3sP;#p?wAL+ZAqPn{cKMo<65Wg*Iqe9=e1XNkkOW{9wN6!`}45{k*?9{ zJ+0xdO)O%y!d|y?p0ZnUsNLSFGwv7AcyXyZLL+M9fOu^HKU*AH zD8jR$%9lt4Wcsv3exvJx@0ho5Fgu9o;2Pc@DJGvii}Dq1OZ zcogXAGP(jX3;c#69+yrj3A~P0y%@od9P&S3X`ij;V*bt(r=8m^tmI+!K`Ad`haI=e zB9s*GEYE>RhLeZ#F?iaf1}%+n0$)Cqn+)Mg*TxfZD4%HgR3W{x63-`K6X_0gA3Rhw z-`IXjx}6R5ySJV(B-4xUn)rr8v_jJ)hVhyGyyNfN+jRmiNxILE5J^c2oIqRkM_g|G zIkT=f>csYIMsaagwf=3?*VIxezCY*}@;@5}bRl-xJ3dv#raB==0nwJ9kEe2U$4V1L z%EzH}qiykMFDiR0uC>6`KK9cO>zUwe7f4wIyVu5ncH6?-&a?nGGf&IBygQ?a_g>w` ztS-&7jvmV&A5$nE+=dC5?%9L1Z7{6t-8*4J{xMnERT|%s!%46YrZ6;fcJPPGzi;jw=fojg;j8+=SeUhGc5o*670!I$^;H`;xhO6&(6-Co=!beXKSmy zd#$X7*DJ|@+XjRR1G{&#sK`K+L4kd*8Ol9P@hr=3cQYl--(~u}lZ=TqziLa`AV}Pz zsq4KZd!%BK%Gd5x#tMA)8gE@+Pm~`mJbqxAGPwyS$}LO5Fi6r~yQ@L+{EhDf_<80k zqym(!gcsBu@+R!qtjSroo0>9iMwhBPi5ikCWvB@WehV%0@@@OiUJyBWXu9gK`P5|- zMsXC$OM(3AO3+~*x#JJ|JO(5hlxQ!x$CrcnKr>wsUr)DJRcO_AF31DCz5*3;>~8zV zouaot7u#(2HNUgf&o*ypuw3ypIrr?*1G?wMG!fYI3q0kf>#}2r#8Qrn|v&+OvTexQ|-^xy(sCfozf3tB(%^S zB4iqt!yzR7X!z109~!=#@P-6o@ySvI}qzZ-k1L*2pR(f zI`V(H9(HNG2aySi^3op54r|gdPg(a0r?wMC!xpM7)HV6OPUE^_J8kob>^zOe9L9WD z1t_EOwnX7S!!;f!X%e9ET{gJxCn*bHNgvDe(^+f6Xg;hp)(hmk{rG6Z+^}zkU7hwT zFU-us^#9<|Ku*y&ntO0~iB}TEmx-TC>Z{@STVRUA`o0=3O+dWGa3;L*&2$+9VAHWP&jgz-%ec<%SYW-&&;nZMvG`^)+K^A(2egA!E~@zvB7@GcgMmU zcZz}YqG`Nt8+i_Sd84hsg>zIF?JXOtMV^_tNi@ysWFObjcME@ksXUOKVCZ}3B{|)Y zQL^h7oS>K!Mm;pfjJ{%9|2z7PE#<5YvLvKWBD(?`OTRH3dV(#(7I)|v(Nw*{p0^;i z@5b7aa_hcV8s&-d{Ml}rYNf%x@tIzRMK=c0i%8XpMEiF_vs(@h`8M$moB54W*+_GJ z<<$?M&G6g49aq5GO2j;3R7&hqqC9va0H5%jJm@~UZzoHq;y8>;Gh9pl@xxF)WiY#o zk=ZFZJ&EtEuDW^EIzdkA(tWyqhv$Y{R1nC~OiNNT&%ZG{`evL*u7zJ-2Oyf|hf zzO*&WM|IA55Od0GCO9)s8F;9`bETGJH$2ufZ-cx%Dk09cVJjZ2XxXmkHFD}7J~)iz zHwN_Ot~Z1lpP@fvtF!*f@U_74kwCUl?$ytG6*k)B@s2+iAe?R&G%jp%hf0-v(K@ybzbv~dX1{H6ME)vYr_D=^S( zsZ~QHRI}p!{G@gCn15oHcu(8k`{aOy?1sbXVJA>KC5efE4(f3_jCRZQ_b?>ipQAsg zM%HT;{MPV-$l<8a`Ng11T@Ke!A@7S+cIxsT#Syrb6fU}~x(s4~mTQDcJ}NEdx+RSt z?V{gmSE!5TXYP%33{)3H+P_@xzGM<`{=(zOrIpW#{;%I$tE+xd>SIm1W?u=Vovp0n zy<>hE3&-JXQt1w(NYf(ElkM2kX(7>|nTE#2G zByx9gsF2}5I~lK4xiO)c=b(56WuCWl-Dc>Zh*DmEYGKB=Ax`? z=k=t|cE{a4Yk|ITV)^FrGN?NH_kLb2KMh z;@w)I_`_XDJ6uok-DJmr69h@zmcxpwLv(0-`(bEH7(zh{!cjzd1x5*)Y*h4i7n??V znR@^wrk;3BORheq)y(uya&IaHYUc`cVv-_W4QAw{(w}H<_OBtTSB8d%eP`>)`A@l9 zGU#!nZ8iyXbpl_Lv1^INC&xrv^9p74Chcr#wh$u?@7-ln1@?mKb&097^_TtN>6ImP zPU0TLCe3ao$_eGc`&&7}#}EA5-Bq+?k<5ghZt@Jlz=1R*>vJrI(0K;6)92|7|^h2f5rQOE;NjafLdn3-|8CBX=KM-jl7M$ig z;GCa{wp^=0A+uk1Db^ns6)~-;etWBEk&6`2wf&>n%R`vr*UjQi!VD$v_lXhf^$d`n zM?9FH9EU+kh7`Ao$+;q-zRYx~4UjFR`^-iJ2izLnE#z8XvTQs~H_p5E`jRg?*6O|D z%qRJ^xwoM6Q~6WB=!|MueT*uEPRmqi-Pt_9AB@W!oKX{SpYF$_8 z%-&mIOV9gh8m$pZd7Q8q`?HDb&y24|qEfE8WuDI1rJ<_SNe%hF6Ko-WUQwswC+=;; zT*iE;-ZZ5*JJg!nJ+dumKy4@CGyjhMeSX=DWF9a~*X3o+9iF3U(B^1U9mSaDMIlc$ ztgGgpMBVg<*xIf}Wyu0Vpy{*GXF6@w6UkqK?W&`_!FI2(oK;!hb`W#YuBsY5<-fxPjci4AOtM2*_(J z4Kes9mj_qrF}HE1o||O2R^aZfCAl$ZAhk`LG}9P=C(w9bQ$H_uCU~M!Xlc4ncehx; zCxZ}NotXhK4-0)W(=&R(GtptjFQ$EezoL6=;W(-x=!lFApHj0jYTHl{B;c4cp0xBm zK4jT^IUQ4GY(h9?e(v8vC#O?Ya~1390?wYhDMA8SomQm_*=i}BXs;%MG{eig8-HB? zvNlbzJKLbfTlY}D%_}*^jMFwJ=KT7OgW}_<_SzXPH+XPkL1xFYgOhc3V!1`5vQhlm ze4m!OghBPUzP_{3;=6YW*mGLpm#Z(33OG+S2~l%9B}+{IoV^4TrEZLTDu0!yrkZu_ z?94pX@*Y%dFsRuW?$cA0jH~LAV$3}WC@Y`QY1Z&Y){M>C$)y#)>do<*9odwn921j?eh{DQ)I&1+tW_EjwK;4)xq)Jt9=0Ge|Z`EFFOYmR#lM;^S#xrbrpE6J5V%N*Rk3=&y`JH zlH3dWzs*8rnp9(S{vros25Ug`3$xtbls~u3hCJO*4k#}-dL~BLQ|uvKFxR*xI#VC5 zJp5$T+OULw45i8I_AD`O^9APgl`EvmOOpD>Uk7_P8C(v%Oz?D1*gYEg*vT)p!+R#U zfCh?3WNlBiqH769*4C-6V+}#p*$sP|+ScHblu*rltygN2>A)+gV|wA^jS&{O*$Su1 z@)DCuJy+xxC0@6F8^1-}G~L<#w9U&fR$P`TL}T%=q|QYIUJk+ zTwO+Tz??d9nN7P;T`FK$fs+ZFP=>*%n|l76`pcoZ?pwCqskZX9lN<}Tjz4Q^t%rg6 zkw^ADZ_}t;5vV6`9;|g6-5snhK=9Pm%{f)iz3|uELk@3;weLZ*HH_~;>VG#oST@j4 zh10>R;+U%n;z)YBJpW7f@hjHG<%aLfi+mMYYG%tv>3y76t|+g4X}{&c+sWzO!78@F zep~6uiz-ac(R#e~m+A;g#YC^k@^;P87;!cv zB?dWsJ6Rn;v=NEyDOO0%{m3EER`ZY&*&v&C!g9fI+iS=si<4ksFN>u`2Mv7#MXz#! zhwh8x-BNj{5x<33RF@@nDdz3o=YfvQyiv9x{=%8y+7NVahS?S&xFKg1UucbPSk3a1 zD)g+u^7+TTi1M342?O|RxuLQln6k>8Gp?my{hO&5Tq+9f8w38Qq%(Y2!r!A8TB>bJ zSDVDQLje*mzfXfkBXHrJ!h3;-2ew5Gj12h}&x+g=^2Upa37NRjQFhs+?pCI_zizP% z!)vAI5QsCGSO%@GuC7TE?snl@n-mxxG*ko^{?P1ONL9PgR6JZ7?O_5AU=n35{VvV8 zmoF1G*Y(Wxb-sF~Pcamo+$oUEbO3i|gV{q3T_&S( zCfd&u1Pv*Cwdg%_&iWqCR?Zd0-^pZ8`%m`jiPc`+F)=Z*HN1}DK4455)BD|lJzTh@ zUy+}w1Y*~AU;fYkZOUs?UUIQf_{x>FU=!?IAtG8H2Bxl-&Ir6|-rO7>-t^m^+nMjg zyrUK-&2!2D0P%&;tE7G0nBV3pg}J%bpsfbf585vv<~6sNy}Q4$3}_;q$%+~& z8*0TF<(eJnnISW?D&Nu>T@}HykM2pr*Gbt!UUI6T-IL3f$`@GqvH;+9Q1g0RCi#UT zAB>;XssiEh1R(MT&Wk(VD90GidvhHReoKpYc5(t^iGLyE^-|*1^iu(4VAU7ay~d+o zqBLkwWTyQ;>gE3YB&=nW^;&%)+oRJh#m7?s3L`RdK|9b zf^}%WG2b%tn{gfVR$W<=){I>=eKn~UzlYV`35LEL~Qe+cwql*7s zuxj8|@b90=sQ4FN)5Q+I#~QWkIf4!C)AsEf0jD5q)D;5|qMFew^IK}i==;B@C)O|z z$ss9*UW|YDFoMLx!mrHJp$}~hJT+!oSeBFwqjiwA*hb`r#>N2NlowT^Mw3tc?6(B zCyB{v_?=~K_sO4#K6FW=qTHbuIR1g|YIQBE{i3`GD6|TI@q}%m*YNoVJzyB3?wUY< zDmd$bKI)h`3G1_GTNAvu0WNTDVG8ZW)NGl*2qC9gu}W|bZJgQhb%fc&RyN}bp4-pp z)&A)zN=HYb^-jMU6LE=MhXBz4o0vLt&%F;o5-V!fXe-s5Q=WJ+Wg<+62@VXne*FL0 zhnDyTTK^sS*O(6GLi{I9qB{fuz{<6e0PIlam(q&p*$fvhEwc_-;QcGkDiN^3Bu3!kS!bDe=V#jo-L#$FjyJJW@0f;5$mK z(;FU)n!1OfIBx$v_inX&fac4%mAJUJedgf`N!~*%iH4V&Woq_meJ)955+1TR z)OU8SEof_#s!HDe?6YsdEj7i0fr);X+XRkqbMPp-q*RjvCVQL%-2N~2-a9I)Z0i@T zR$J}16;TlZ0UH!rKxjnCP_z=vC^@5`6cU9Zr@~As5NVP`36d$v8I+uZ#3GdzIhKSX zR=BJ7F3C_?bX%o?R%`j5rOdS7}}szCD?iT?6#0-_jg;82EW)y zrtD{2iYP>*bO$@*J{*gP0a|bDyJK)2$iiDj9x6iV5a>0{$;pAQaDDQGG@dl5QQ6GF zZ!6+)0bE3Un!XOwVt`k>{}|*Rp@IWC3s9Wp9|L_61mxv=e>Jyz z)c*0ujXKO5Zz~U~9u=#nw>o}Bs*?RkGO zE{Tjh*$bYoRKXl9*Zhma@5~h+wP!NlqolyR=Wx-ca$VGQjjsjzs*)iF zArbj03=I^~&1!ahCTapaP?Fz5y?A*CS^&zwF3x{r`1rTndaN^Ux5otG!qGu-fOzHXX%nA!X`7ONX16Dlr|Kbs5;7z9G&clf?) z0ShVB>7i0xfBYNC_Yjp^nJ+^o^~5MQ*)*#j_?vNuk}%8x2#m7LF>Wr^!OduX)FrsJ zP`WZq39S=9nc46d<7RBu<)Kj|_A?(AU{*_Hlpgl=4J2H!P->KKP6`3(;vi2i5~fsX z;5DuT`*(H^?3dj9O?5sXyqw&AccSXs-f*13XfpCoV9b z7;_>L;rrKJa?>Ft%`Fe6Fz}fJ0pRk{TXV%3z@K(X=)UXS#O3W~b@Sh@S!Kn^Hy z8QPi*OVS0fQf~&EctHmnZ;C@t7Pu$T9eR@O)VL?T4pX{0P9}q3kf|6XC&??@GYJ^l z_KXT6+U7f*_iySoyzdz=2E}Us^~)Pu6$Toqua^8e=;ji_q5yEA$ff|9nW_zKa@jUR zdn$=e9WVrVoIstVtRdkE2SCgDb<@KWjK8Er%VTFwBLfg06gEBb*qMq`kib?|=W^d-v5V z&=qNEe(pR|eAq%PUg$eF7ikLcUOm_hFG~YVj_1$E#wuK#U2obVg!J{KJzX9=cr+E~ zs-wea5`@En&LW`qb1~>$GHeh(Ge^QqhSGcp_6z}%$Og$@%byAg*47FNRB%c0YCZkV zbgD6cTHDUn&>vu1?uf+UMX1Ssrkn%NI?I36y0ogQQr%2 zcew6{G=QM1+Squ6+hZ~B>g!vadl3Q0+*gK&U-RbXkk{q6?yEC=DL$8^_14S5FnsSo z;w|xKqZ*nz-HGb4J;;C52tXG&J6~4xK%y-k&$UPQ_iv6H8h6kssb6~4BX3;23dVQX zR^bm3Q*E0hPJgA;q6R`hOYq9VDTkaw|vWQD{MB?Zu(QHF_B~GF`(xN0w z)FZXqj%eb!+5+U=YRhT|VS;cCSG_|F#8q>0RkK$yEXBk)@2pvd0`+NH; z4GnmFLj%R#Y%(a6Wx)D|*>)yhT;-E*z)~| z#8a&I8eA6-^4<=4z7voRz}X!goeJfxep?~WhcMP4ogI=mwo`X>z*26%jhwdMIgLA& zH(_WWC$~9Z7>d;K@JiXM7A(Ix8p_|>D^??~FxK9`C#9ucIvuygD`fV{iLBOW4q#Ph zW~Kw6)2O=>W|a52x!iIF4|b;yilZ!UfcQ?-)d*BDP$}GrdwpHPkq-{6raOmiMy=|t zGunpux(G0iOy9Hh)0yH;c{`W#hhk-mpP!J18FfhwIyQgt&%g!-78C>q_U~4#vVusDLF3>Ce&>S9m?14NIuD~R&m}J+}jJHLJG#<6}~#!LJCx;3@xfD_wPKQ z#Cq#{J+5Y|>gr9e-WL1sLs+rVeSR!Wc)07hzP|r<_X^oS zDu?lepERuaDEB>iG)7bX7RY)K zzef!fH*T4R6cB_Gtfecpo#GiUv3HZc*fbv!ROkTjXvWAHKX6e! z>;Sc4j30wgXx0Yp@OFrr2|aY^=FLNgRB*6GlPC$%(o!5|_bK;>?RG|BhwFR=Aw}?0 zcm#fO3Z;NcS7+QE^(b|g46oMYIcO~-`Uv!9ek#?S2lTXW2%PC^+Sl}BTu)p_0wA}@ zM-Etk%wQ<1s||r|ZRX}73IM*F(ywY#R=zV15^Ptj$@*_u?HY03-W9H!o;L77if1Z;4ChBKyr0>4F zB&Swh?m@Pin!uU|c|LgZy$1kvm|sClko@!vv)2|(x92;tY$wTL0oemp1+~M&hlqrqrV-0BP>yV`sNOdg>APv@wI^sYsp3sW*Pp|BLHir+XF`DU?MU z@`4FfWUOVZy8PsdqKuQ}!^!UVIR{=QCm%naoV>DoP0QLCwJ%a76+1`J?{9clpmwi% z)h$7{JS{)k=CrQ7VK&kSTOpFvD2Lnq_PWUE5t#9{r_34OT45naY@Job|K^E-MZ zAD+>l_2Ztf*UKh&yLjy2QNrh9R=nksK5Z3I=9})Jx|H1Cu;VUEtWnk^Bx2e|xx5>= z-WM%r-)JSh)I)ipvM#yjl}jfr)*O|L><7C?=^Z3KyWd(-zgP4;Hs3z5p}&&now2^& zo2n6y3QRfUgPm#bWD7j{tvPfiUe57syhPmB4EZPi!fY*9XwiXqnOjqXqWM8lEq`Ab zotQ^8{5qjiuf&E?cX=^hbQ*j( zZH#Q;&umAYePDR@ATvP>Rb6_AE9T+%>qxT|>YvQTo4k-XC(jjGinA7YqexIPm^OA8{*X5iN zGwQaWjmog&x7r1qY3bd^=kN#lxlorh0Z&TTh3EQYd!fs&8-+HP@!bhn&|8?!$*VnR zDx@IrC2v}NCSY`<390A?;`5>*oD-D|R5n!p0N!!sH;1u;6&<4mzw&qPmHlOhT8lP~ zj}ko9)C2|9)P&=+Bc1y-S4FvDa$QrNBF+b&!{NWv| zF}bXrwfYL#P8aVP(KHTOpksg1fxH@tFCWdsUN+r6k(4dRB{Fa_U+Y+x&YYY1Ir6Vk z{EN*A2bHUjTl?XZ9q-|u{H~*+@YQdNzv7juH7Sc%7{{X(9{A#%&atkK;nE_?XAB<; z8J@ij@YUAPXhYFJOq6d5YEeySnIE3$Y_)q&BkOt*{Z&7_E~4R1fK_{Q5fbrE zZ^FY%2)Sxo8s6{jo!w&?ovL-6JV~7GNuXwrKOlVlnjA8Ip{q6ib+i6_xY5HYe;KoL z*gMs-GxIY9qGWC4HNEA@zsSGEWa7@|s^o-?SZT+9w@~3E&u58MZ_Q_shiWdlT8`g5-05Q=sUxO)AG1AA#hCP|qWl8M$PNmd?7Q#l< z8+=~=leq)uUEL?IKZuAFGchu;tM5^1W;;G1r0_w;5Z}lVJZLqVr8Og(o>E16ws@)a zM#y7j$4CsHd6k9BtLX#5ORlK!8m@{8*QNPoyFLy|`PSey{wruO7)t?&a(AE47Xi)2S{0agO&+ovg&E`TUOcYCRUx!VjFBVK9Zw9W75Hpu6bsmY(qn*^dIElV(6Z3 zdFWduxpB+crR{@M?v%l!#u4(PtjEPK@mx=rb!R_9vdyHxSl zN3cPsWwi){IhR>%$F{uyF|=>km|uJ(5!N(~NnpHn2lmU9O>Rrq?9?akWb$nEyIOZ% zn!2vQCzBZ;H2G~_S4vXyqJ~jt|3P=j``8Ocf$!pnyq2HM50gaw(0bHcjmF)6z9R85 zbOBO_Kb`Y^?{aohaS-qg`0AX6d@Rd+>OAb*DiUd`+UnPJbjE!u#(Ep0f+J>PyE;0$ zx;ku^wT-Cq`w#GX7p}OQO3c_#;XR6+H8Ty$4y{_&RGi86BsX2!5p^T?gIZK_|2!!x zI$8rjC}H+R2fI`iltYV~yPg&*IqmLhYrK(bGgcN4R@mvs-sn*I-QD5@nXIsS&D2yP zB-KOqBU$EU4qul@*3?uWdt{{4CMDO0hf1w*l~OF3pC8Uk)_>4iTl?k9SbofOip}{a z?2|orT>rA+h(*1-pRC#DMmxlAf}z&FY-s`Kogn`Q08}B`*>OSiWW_IAI)w{y;et)_ z9!?%R43JFVJ82GW9Lo`ZdT|^nR;49WwXRXn$p;45_ZmJ=hvw(|v$C?bd27m|L4i&1 zbXVZ~ocs6B6;)<}HaEuxOFTEn@OZ4;NWBBC5*?hC z1?pAWYL+ttm5Sq*?}8D@HZi^BHYV|uKe7f+oX4Eh^loLvhG8u9QjTqxs5_I_EXjrG zMzoX<$FFd>{>8K8%5GHVNS+TDcIUwdj3}ig9tTHEHplTNYZU7X)teSIdz&_gwjvEF z*&(L<>Ev-BZxfHh@|J%R&y#0FYd+ugck<4BtiOY+97|Cub~^#zPrBv)HhJSFUdVyn z=>tQ!RgV~-5g7@%Q(8bYHOf2eWFv39AfbGegAmqMBhe)H1XlPaJRNCtXNKiu@RF&l zJB$G&mIA8c^ocB+v8Rai9@$WqTiLjPiIDS_EvvQ z4Wv*{v9E(tY+E?JcBZG*H1t8#zkfk9$Qg!zx*=#a~#+}r{i2u zSggVZXq3|#2YPp+X9~LRr~@w?|M=sTSlIrCZEH#vu6m1@g_}-TX1G8zAh(dx%}nQf zgXA>Hh#TriVt=_zp@dBW_RLr{R{0h9mjV3^n~7rG*kCS z9_=;=sTsKE%+Ii8a$Q)MR9TS!b|!S?!S=LCgTSjL zLHcE9vfORm!C-Fk9V>&lip1Wb+6tLb{ok)NMy0vU_XUc@%d+K7ygY=f@%eV;Y)Dwx zYZmo8cHVYVv$K92yTC=&zEfNe)Gq0--vhebr?F_rYP}#puWZcA3k}Ut!ES2` zqJ0Cx1+CMar^B)c(VThQRtt`R3+10q?11(-3kxc^LhZ7GV!lq3Tj-!|&~JyK6uEcK z*Vf_-4jt-bfQsZ?fuGB-7!VwinuWYbOxYUp6vhn=ZZ`9Kd%N>Q3+5!V4h-a)ni{sP zHOI#TcjzO|Xz;q#vmfJ*JC_}j)r?<=r+bP`4exdaY&PZ^gXYz_#!N#W>(j7R%^htX zuRqq-#x5o*#_r9Kcwd)c)%2nTYGww7T9eP$@|e%ic)ZX%Xy&LLzpX8)EZQ0_OsWEi z5%FrE=5{{DxT7-zd)}g9h|;^uDr6uNC5|V;;%|GQdO~!xlfhSrQy)bKesPn?918 zLV%pI40Fq_<=%|V<9Q%U+;iPxXUys*4KSAa};S98Jn)M>Bvo{Fh5#*5*z z>a36e8@Gb2f;T{FQG+-0We-X9_`ZyE7Sh#y2k?Ek8qDT$#qD@&T3?FG+RhdJH#XHswO>`C8ZW+1VVe5hdyhK6Sg(&NOir(*C?!o3_ z;qe}sTEjcSfYKQ|`!`ioj91DT9tzi+o(pZp-O&Pq32sC?{?nLy*-N3|Zfj^{4jszi))sC9C@V z63WU4v__j?e7kmoe3~B*pY%}c$@U^EAIH#{-GrXAZqkLKh5{Me}U>pXjEy8ok<;LN{IUzV5;Xity+N&npPv!oNw?Dbuv z^D&xDas@qTuZgrDg}`Zm|TL7^`% zw%m4qSV{CP8R4WPrb;sOTiy`4$>StD3dPPYeC{Bt`@@npn|c{9rblzj+v`4oGAnWH zY4e#?8~$n;2Z1a#bT@I$*soElDXoin_PU)twTKiR;@gYv)TiwwC^PA)BcE3fZP?MPj%5mx{5PHMYKW!0^Z&} zDs&qWFp)zFN@@-_8(H4+&OVzIHl6S~D&$m6qlejkRMogHtCCj2ES_?>RqosTjH!F_ z%)x2>>nV4zZI4#>1v@k>RJ_{i}MJ+2Qyod=ddu)*a-W<8}?=Bp=c<-Cnz)G_iz6GI`~U}c6!|k z%e?gTVyA7qtwLLaj67_oX5GPw(HxguX#KdyWg+Mm9q-_>&`8Y z7iCq*(Qm9)NVsk=9QVShGWgx3fh^5ADsR_GQqor08)RCRlQU7o)|)f8B5}w4$dVKL z3wcsq&gdZr3}{IN)cXdG=Hnc?8gueZgA?+DoFa{!M0r%#48tZpa%5mfQEV{4Rfmj` zsh4|CksFyF|6Sj~hiLSOu5g+_c(;v2+Nk6ZMhZ5TxlfN)Y+|KOvJ>2rp&)L0nr#wyDOwjm8x|TXn`20VXQR^-VLOA3)#WpdjpJz8>oj zTTc`eV{=a=*WYWZ5Z&Xq+N6A+wI<@gVSICjL(^NNZF_DCy~=y&*hijzJVal>)jm}((=LrL*d8KPn53J zSa~@&6p#}1Y;PSM9d)Hy#LuWwg6zn0!o=Exhub9T8?pS@HGSN5b8Ihg~2luiin&moVcT#gIZ(x3*+L?aDy*w1rRS6-uFag$QYZZ+mB9q;bA>MtlDxWHznlV7%WnzLld zQ)gjZAD=GnDdKRu;P?}3s$FzAmfvtiP7sKPza@%#`c#49nNzsXr#gJKUC)5w)(sCA z*3C7kay(bO)VcqU@QdA1PW=>?IVq{4Jfm%+)EmJ0aLP1e@KyYI%Z98joqD#AO-kJ1 zmq^O@&JQIkX;U%msI(y+4DA|*$Q=%XM<=UXi(9F6zqohQ6aeL0z_X!XL;;0!GwNj+ zpO1^rdj>{oM!MPY0_g)zWML+JB}gODbfc=ZBonpc6W0Pypbg&DhpzJWdm zDv%Zge>=;}MS;Hk^M%pd>pzS-4-KK@lrwBncB7T<6OFB{a-{6+6O2TZ!~#$jQbSuf zzt?P|nFqNwyr;+Czo&Skq|F-B^&3A~+R|6W=$mv}TaDKS9+lv|p-)!xq3^OF7iW$j zu$Q20$Ghtc3pPE?ejI=>fHX%Hv1EL8C23$2d<2Hr<@l`JW9BUmpuc{aoqf7DQZy&q zs9cL0DrIj{tr5H~!h|2XFQPund2%;!H( z0RFS4|EwtxzyHYIe`vrsh5!5T0aJ4Wk)19AZE3BGzC@ty)(}cpNOY5R1IAo@UHIXo zZmW$M%=qBu48{afWk5*gA>3ex@mHV{>Hn-s{d47ip$=2GL|kfxt7js`aJeScr)x~g z3-cZ4G*X*bep}yU`E3#3OQ=D0eNr+deEQVf{QY}#b7!2fv7DKd!0SpZ2Hf(d}XD zLx*rxC%ZT@Mpq^Z3YtRHB3y~}H^t(lSwEZREClHX#k6s(1&W9b#47A%wcOZnaoyao z5WLvPvD=T+(eWW?N$B1D`e}!e2tJ90A)^R49~X`3K}5_ zKduIe30oYVP8etJF`;mFMaO+S6+Q8-4m9*o^jb2=a9T2u$Mnp6%_>q@YcJ?hT)Q>t zk580HYHY=LKKaf?@N1G^sc&sPu3j&0ktS~uLggYzJI{BP3GI|J;)uQ*>p9Fx$FNFp z3aelR@3gnaO6Qeb@V`nHx%jrpAdRK*ve?+7UT6&JT7lF8nb~%N^(ntNGF7MD&=HYW=jTN+O^4h;F@DCT%)=opI7A^ zU&Doxqv^OP#27-o$C!x$hzkNqjW_fkOga<^c!$>)s4VL|K4(a{FI(}ziJbk>NI4?! zm0Z`yZRLyg%8fU!6XI@{&rH+ww93OJj(#(zeRA-j^<`{tQ|2Dz5X0+4Fj+gY z3l(omf5qRiIHb9_@81o}c(v_0BCKfii4 zJh{#zqxvs&_LIkRledPcmbphxXpRla-v zO~%vi3&Tu$_JX=d6l#s=k-;-Uo!1o>uG!H>>wNh|bMKmRmUtaNB=vK>|M2f^sWwK0 z3sDS*VU?21Bp;ALlyU^eIwG#C;Tva^l2&Im}OoUKj zp_3@1rgc4j`)^k1flcQ&DS-BZbV?r+d;~nx2&G?{GZE_2=@T!?i_U6Y3+RX*8klt4b zoTs3jMF33qNu%n@a*u7tcr{6kl{vcj>4#Cq3LIe3!L*m5XSx#PO2~r&>EezpQNc17 zFUreb)K~}bE?~BI@|ZrnPrT(0p!}c_D<9(Z?d9AYyUEseS4FuG4GOelLqf1v0Husu zKOzH{5t%pLNHuRN&VR58QSca`pd4z~p7Bs->ZtEzePLyi?!DXOj!=-{TLDr_15M05 z13s;6t>Ez!O~MKSdzvybT3RwP3=)vzp2bOwW&BaXcI^%=11p|@^RT=o=imL^?(syl zM9gij2~r~ouKpqpCWSy9BxT!5;M`?|l1WvSf+1dsjUwqXK=6n-&UEJH#>uIvzIrXptZ18n-LSqVk85(7 zKQm8lidC%o_+-b427(@Oh9=cvS3su0QviwvxgF!KL4GtP$5nOPcHZ0`pG^Ck+0hb&vCnTU+n1PE7RkqIcG# zC0uWE6@=hnZkS&?1BqEx*iri?_F&uUeX4j6zalGq3CN;{0$K8l0$$ zc47bi&UE>a!%eUH&bQmi!z{7)fT|kHX+N#pA+l5Owfk9JTR~ZxG({O#JNNB7tA7CS z1+ctMFe$HT)myngD`0f3ljHi8eQIl^tZbqQG&0*Hn3`hfRI|a}$EzmoU zDseL$4A%0UhRYt`6v}_rPhENy!|&L*n9``=xwE^uc-~4O?!5XQPl#Aw8W%~MP#mdb zXH<=GZ(=laXW8Bdfw=qkA^jlpi#eZYkZDX!Z35lPWn%^zLQuP30+-=7#NzI-%~X1P zJ+=K9NQ?hMN5~RL2f}pw z!c^OipI2OK?@8{GKMpcE&OlCUOGU`3zYhjXw@vuXC%V z8zX(C>H1MIVJPdl#er{0(C)t$2Jij|R!uI zDvYxEs`psY)JE2b@Jp~b+o~WEUqcr}`V*RNOwXvty-UCiW=)!+m1D^sV;wnX1b0Z4n^N@^c^HI3c66G5;CMpx6*Rq$z$ywpM z5=C)0O+5Ug{8#Jx8$@j$SkSEDQXKf~bN)7thiai;9L`{DmuvX{t$@)YO!5X)5V!SW-$7w-}YThLdt{Q>H)QslT=n?fM)h`E*1O@X*cgLbv8@U1l?JMu` zIDII1+nC`L6q=NatJz)N@j&Y^#6KdY_kRZI5M=R-7cX79c&w%|yE^K|sZcX{ftU-^ zgl-v&-P^Zs-@0}Cvfc817n&KZhTI~^#g*XoR-w_wphf3lNo?nbTmiTgeCL+!r6Pc7 zf=KL888|6H5jCxLoLZgQx*^#hOt@86Qc_V-;`;ERo7=;)^oZUR0l7AbToMJ~Hm|Ma zG71?Ga|@Fh*e|Lu=P`bPXqlTVTrq^o8=>zYY$E)RY4pFCM*l{TA9hl=2aQGoJFkYm z3zwMUXwC3}mNdwyibWJ2gXw&?pk}v%zKx_WlSJVIX@?(ShdmKEcaz zqERqm(tR@Zrm%3NX#ZwEW_N@@+b99>zu>aQDUI1uaG+x?G2@C*M1l;&g9U1>!{Nl8WE-wj}w z5HXK`#6(Y#WMXt&5Mv;a05F#TR~1+2K|v|T z4;y8!31DNs$cIY2gFDAFogfI?b9^;Ji2#k|(pAB(OvpHKfsHZW;$xb>0dI$3+6O=c z^9HZhC~Yuwh)y0q;B5j59Uwa*aki+>v^}Np7YO{LP#8Y(3v*nsbiUD`?Z27T@#)AP z7lyLkAqQn8xKXYB{o;$IVuo^>7zX=}5<40e8;9z_xcX-F0-$tRM7JKSe^{bfFBk8m z)I}+9p#S)RM#H=|doWb8@l_{?yGw>zFsB-K70&43PGQ^o+3(_r`BhuSB+jtFV{v*j z)GZ_FD(Z2;m0d!>&|4U~ny3T6Y*t#lLYyk&mm_TsnJYtZnhwG#){j#&{?#e;x&JGK zn=g$J!j0Ft{F^ZF%)jQ9oO+T~^+`0%0?!zO64RzmOm6tlmG_fVp0NLR=z04vXjw18 zsXmA;>SCiA>f8rOBs2o1y92SMs*F{+eJdzBR2-Q1;sldRC?+Y`JaH$qiTK+a zOynC8c(&g!3#xav$I9bl2Lqz=hp-Sz4cK#p5CXlw8I6*4U!5vTVct3q+n%3{`>b8e z<<%!7%OIcC@N>w7LzD(SeM+M}eVTL2FsLkqfn1Xe&FX(5*AzOSCD<1(Z!&a$#OywJ ziOxj&ab~FnY>t(9?|N@OsyX)8!**|x9jGGQcYulbV|6s z5q8HDXmiv}(vSn43To>(p{L%0pPFdVkCO7Ctk8Ecyci=P>L%Sp$kuMJHREr7tfud~ zhW)+a0v8)^HTnC~HwXd(0Va@r(e1&UsesC`o?>L)&MnCJ_WeNs0hPX}!yNoBLD9h~ zI7aOW!cv>5PtP$uP^NSbXYybTU`s?w7A#l9=&MNjObw-tm-Zb=O#`1lfM(bcDm#n7 zLA|?J00ZDuQPi2^h+No7Be)!3Q7^M?Z_A@=)?a1kdK!>D^;@%r0iM@l=xE^--V+%0H)IG#CKd6~BG&X=(1Oep2U;aBmFg~>Ihs6Fn z5F{+A;pWAav&9S&z{a6c6%@J@M|yh)H){`hdux|Kk_}<`Gc?gzy-3m9>Zavd&cd8{ z+SS)4?o-|G{bYFLB&FRJ_0+r2UF3DGF%5_wiJ>A;GFl;aVGy}9yF*S_-408j;DA>w zAy$5SV?*60NVpu((7L!df3<;KSlE?B zn^Sisb#``kb#>-ud(pOAZ){UTupm-lZhr5&!%Qc)oXc`{TG7g!wKWTXySZ?I`i%t? z%#g+bY=&Ju#~MxBOUfkqTu(?Q?ygYVVS!U65Te1dhrfNsg78X7}76K9J!LC-DM?n+LuhNnm zZKlCkZn=%M1YS8EXAJxjF7v;vz`MTES>!-*p-19JJz;A;)e%0`?H0j9hZ4MweQvE> z;@@(cZr|F9mRK2n=K>vw2rM5YGANPp@u-BQxr}nQh6`THW%2RCx;|?$!n&KxAq0o$jxh!ShS36{gZoNKN^WGC zVzJ@))i0@Yb2lR+B4}Gpsi~QU5N9AxutLt(VLc389WzYe#XTxUqNfLIilrg73z2dg zBAultl`2-PCpzr5xm@MxN@_gTuU!HiZip?Y^({haMA)=Q+^AY!!F$`WA3}ctuG|=C z8+C8&?3lQ{$Kfa&_bMD7O|5n5>SWrX8x4s>W`Zb#%vEU<%`G zbIPGHkqz{>iGktat0TNHPYm>~jiRx7Z)Bf^7uo6=?9}l_L=YFo$GPP^JSWg?+&wc1 zxNLyL)T1_tLlHtGd1V21`RgM~@QgZ^M+|y0Egw`u8)reyKseL9aUouz3ki|~V77gd zyi-#-?4r6`&@+^*hM}z`tF>*~E*7{TixfVhx(H z(R1i?h#2&o2ozhzjnBR=J+`xJ>Xlc6^x!YbELoBheF?H&$0|IfI@^{;OuZa-T{iSq zY1)OwIMVja?hbMBG!Ot=#>g=X3q{Hki|O|06@ckX3|55L&|Qr|i3?|Ow`qpOKNE76MTAM>@|V>0X}+0l-cBl z2CU9+ScRst5zC-_viFzjLEG5Kn~@*V#EjoXM<*mix5#N0^&9ffvo{^XIW+R$Wm^hS zyk4_vUCbLmBFN)?nqU1q;lpevvHESW@dnV*dQ0%h>9+0;XQa9AU?&2qRCXsWt7evZ z3?etdc4@xceHflL%zBodMGgV-tZ#=b&t%*>{ISFTYtLNo1PB?RLinDY>FJDlLA z$xT$S^0)Dj=jR!1HPPru(TI(;#Kub~=>=o+h;&XgA z_=>ouZF1UkU3HX|m93-rLGqy@c58w|h8t4+Vm%o@l$qcl(&4x2+IWv4-j<-&P6$g0 z)_3lp10um^0!u{Pt`{-u8M){@EGe|Xu@dNaIDA_d|MD*XiDNynl}l~9e`TeWYrlBC zN|akT6cnM*Xx7m7OzkFTQIMl@X{*{p;-WGaF-}?z`|I*UVYzABh6bY7D)2FJG0s8#OrN5E0llsM^5IdB_BXlp5|$_lS)X7kw1@p1vVi zP4ker>dAHZfomN+WcZY~KB>HCfeLbWT{ijI8bvFG`y4_qfpFI_|D9l~_xFwIIDC+>q%apt4%JS0Z}7bf7wzRmj$au|m(JBYC2P;IJoF)c7#nv<)EGVr zH{wdC+X&udkj1mLqM0^NP=}eM+XO!MQdFD-%r33GV~_Oj)_&1w#g4TtzL>x!9(T}! z8&OLGb!q#G_iwr;GG5YR|Xdwwai<8O~-KN)15L8cxTFRfe=+7m2g zM_G22vb!{YJo!MtUKep8$zlszFS~7npgiyBHwz`6+33bpzbgZpRr&XJcH%MBI9&YC zMI+Qd!03Hl&O&OIjh>c+b%$eC5nuLkhvb1ty{(g`hr)y0JkyGH>u-#y70LU#b z_sC8B`|w_zO7hVtK%UF|Q#J8@W+s5G+8=Mg8=s|Z@pK~HaNym>i37Vt>VWdQObw%L zG3BVz@c!$)Wp-i!t+LunhE%b@ufZ>T!cJ>c%>5}T-H^kXHqY^y`0M%VJNf1o<{{!6 zr*REG)qm{qEFJ;GnlJvKYP049@A=12OB948Xi*MMC9$p!?XidrOG^z+7LWlQ_XoIp2MNLxGW(=WnbG*Cz`}gj<<3*fVP z5=Dn(?;kvvOwt`=@Sv|<=#{F*i#Oeu@oTk*JU0sSwHtb(FOt8>UX2cLVhyo=l$BX! zhe9DKPh5)KVyqS}N7&afSTij6ocfD{ZGD$*T@qAqgW$I8`nL!M3vt2LbBx5H{IIU7z_rroW{xi!@s9^QpC)yErs&+ zxtgc~Ell}K@3z4XAI;%+Qua%_86Lb*`KDK04oJFRdG(HTQNAr zO=I6j?z#tKe~PN4h@HF}d@e?+{I%aP)!zywf8(4{Snf@KhY#vT|CkUY6cOcDqGb&q zz(y+x=z;oe-a5bYDK@m7bgf$*%?YwE{xVj9+39U$PZ2E$a+Vvoycq9r{+Y*2R|qx^ ziGfWl>@eDh%a!D_^=+&|LW0*vTH?5Pz1|hs%nOoM+tZ!W3_Byy81_Oqy}a0yZl#i% zTS+Q5x3NYOuk|8LZ8}xWGp6b=OMg%oD)T13_ruzDolvQ_eG`CyH1m^ctJ!+X+og$#)(|C<@mmA7G&o*$=R&7Bw6ckY$Yf zj5h%X8y}x6QcPEJN2K}xW`93IoAqLeV1_+&Ei4lY z{)aIBeZIxkN!&@y1jSWzu05UZkSD7p(O)j*d>Xq0x^#oGqs7 zSWp%kv;EAaPYK;Jm!nmdB9P45OOV z`{*{m$TCcuDRORh5$uoVU~bH&tDFndl_9x%d?MH1_rA;ZS^2;~fVEXR1RDT_{E*7$ zn|oB7(AwuyaK=&FjzFWVjR4q)GE~=ESLC4=ML0i1M8;Uum7-d`JK+5cZKHH0AZBWB z=n~sci{N#hj~X@FawzM1AIhXT-F7$FP30fA&%wkV4uwoK$>+(P3Ys*>)- zrFh9N!>4NL{6_JWX#U&p4IV_FbKw6_R@PA8Hd^+cn?zXVh-@WHU@yXsi>a4tQo1XJ zOrA*i@mP=b86?u9&@IOAXt=-C<&pOG*SwwWuxkGI6|WpT@Em-9E%@`RBoF;)gI96y zZ_Va-6}3&DKVF$7_=fX+?!~<3fu>F?726uU%F_0@5rqkjXwS+#v+O^3u}kqV$3Shs z5Zx+#XS6FzQB1qd%FuIrb$xxRjkW*AgPw2G{(tC2NA+cBg2wc++RmPag0)@^q1Ce` zR&}^XC+BnMqv2)f37-tfJxrg6TEgFD=n9mP74=!?19V%W>jtC`&K-wb8?&-%03$&$ zL)Ae0Tlmqu_cnStutA$?rCy-IT}rg&+X^o^YmMUK@u8BxeIwdtmYcQh6XtSWPO>s~ z=$5#G)mA}RUoD>^joO`xBoaOplx}p!A$NKjaV;>bo$-d;1PJ3a9O)(d&dw{~M(PR) zRPjs8Y_JtG_MlU5MiLivL_d%2e7qCw6Ku{48)^plVkhM&oSV!rY@FG8k0iY^9e0g! zSZ>{?5NOyhkAJhQN*V?0#{eVBD%G(jZ!8ai6SfR90;^|CgFEpi7Iu)a3bu|2oQ$x2 zl$9i8s2s^B+1Gk5%8)l!sJ9M2#=74vUS0w*_Y$J3N?CkOq5w%&ccc8nrq7vDC)5_5X(#s_YY*#P0LbKPUidLq^a8dbG3#!i&(#A z<*3J?)3KXnI!D4W1%0D?Vi6U#E-Zx;&Rn`SCYS5UQDa+f5_K7Q^h)u9%QK=zWvej+ zqq4WU6CXD^tyH=AHrQQnN2{c^F%HjX9v#-0<}Bp}NP%02-Bon9qvvZ5+FRFNUE=e& z@8aU)0~C?AHULL2W@)DYiWp2m2~4et3n?iOsW$M;`p3Ol|0BDCVm_ zio;*KZPND@AYRfbvpp_G(iejr7(IIX`xkdv%l;s|?pZ_b;y-=r@;O=6*Soe@prA0f zu2(qOhy@)D8nNm1Iu!^@$%BR#*GDUTqT;CE>+pD+sajw#8t@D4Y2*mu@xw{ERyx?$ zhfZr-1x;YPZ2OKijy)N(zzyjgmEr2?RNdh_$cE#pDo+wz&4iLb1A%{8j@Q6B_FnFCFE*6Lw>x?kxc{|LWH9iH>x^Vdl1&^8`)1eJEQF{T-G? z_VMmpb>L&Yzb;(ORC^56srx#ORhAVxtxbRW)ZP8*lPdjxu=m|TO}6cx_*!3mEeL{i z1r_N60!mjQpdg6!u3!MEBE1F+N=HPHjuL?o2$2$c5v3?dlM)gHG?88d5=tn0ClBxL z*>mQ6yZgsEGyBb#VKS41`QA_T_beA@TyazT84}9jy}A5+x0s; zQ@Xo<{_O7d7;kRwNY7utL|5pz<@DUG8t^2s^`k}Cx}Zu-OofVOputP88cHJjThM0M zqM%f>SU9^wOqYr1-L%8$_sCcL4F7aeKQ-2yM{dzgzfqYu?!UK!-Fjxe-V@|P@gkpG z=Cr{yAdJjgO7*dW6@?@mH6psJZ!EF>=J@vn=5aY;o@NTnLNp*p7SB~Q{_`i^nvb_| zN>GayqeZxMhiZ}~DDaX?!@y!3XCiAf5H;cQcccaC8_M)K+fm?!pm6q1E&$k~l&0Ci zom4_0Mk%nuiY)f{3W4ZL@iGW^3etpo7;-uF^`B@{n7Y*Lo2xw}m12)HcFxVHc2;Tp z*nq=7;0~}JS?XL_#()}?AVkRi6t z3RMoB_L-aXY67Nf2%HsW;S1#onQ&~IZY2s|Wey70!;qdRu;vO6QP=y{G|TQL!bm#6uHJU7?B zedF7%C^gs~=KparplvXhVZkt|E2+5k^aUtiBsYBlJv z1|CX`hhaHuh!G8s*!&fb*u;bO9UxnqkBunbc#Z&NgaLsNJ$Ojfj56@kdgCW$C|Db^|c8t6#bW8s~W#9jG*1($mqq7F3o}q>P2YdCO0*djk&G4_y0PS=- z%AFBEhF<=cB8~p1f+2SHflRY8)(ApZyphiP-*9UGUE}$0yYc_Z3l-z07#PzJiDKqu zUpuYql~w9%YKn_%xUq6>;$lNJ$>p2FwLw7vD|3T*JSey@TpK{08B)09ySnh>N12O~ z(qI{`c(j{{t+z6#txx-occcT``Fo{@z1MO_y6<>nI8sA5Z2;#q-eFnYI)B;!6cw?( zMI>(0);0mUqb`VR2rB82Qnx}^wy}K1Ls3SwvE=6}8)L~D)=CzBzYY6b=y;3?EBnrv z*i0p^x6NU@`(0VOYi9=o{L9>8eHQs+nH?3~NdrYJf-sHe&owlrV=u!BE)Y#OCYuVa z(o?Jbmp=gP?C$-GW#PO@Nfm*9Ha5Y*?(V_L5*yNdL%geSK+Zef_q! zayJPHIoS)G8XyZlzVaO;?>)w4t1@Q@;kMY^^ltRzqzEW~$_3SPxD|_;S6m(kPeGLZ zD#7bEeJ=Ocil8hHQUUxT1IEpzeaC8M+_TlI7EBgJRS{5F@f zv+>rk0_{eAU#VcX7#9|-nDn(UfL9Cm{&qY(>rmZE?2c(^iiqHc4?VPwKPQ>oQ{$C0 z4!V%|B6tB*q1%777z;@H+ncR7N&`J2PiahCxEjrTdt;T%C8xZ#di`zlyz5;9`>*rc zJ-PMyFuS6O&Klbh2ZM`5j+Ue$bJ4!q?%{kTg`!N8;i|9o(ZMl@1eeY+GlrXXF%j~f zZnC~@t}(gWNS5UsYhwwBtT49%iThGd?}iDI$z4pC8BD8x<^5XNy|9QVzlOTy>?#TC ztwM*%u^6Stv7@^|2QuSAZQa+g06dqr;)EXq6W=C2oPHvcm(n`1RFhA)?wC-2Urf(J zM3NuAY~c8nB@S0J5BpFA1{eHxJWLw4+`4MqgmH1!B3ayH#}rztaZUdTmo+MVSH?%$ zJfJKqVa2Gpa4`ZJ_u<6D3!^Se{pO!Oxz+YX>BdF``JJvec~x2A($3P*8b)v_?YHHC zJ09!k1T};^<)&Eh$fwVbstA2240MY1pl1}Avu+APRUBBMfbpA#2oV9;Mj_$QF22hE zA{Zy&Hd0*JLfsJw=qoC!?K_ji2N%)dFNN8>||u<(h?q9j*ht_x{t@b z+0oX!txZIvtu1=UzHxsf8Qndu*IhzrNm43-+q|a#nyTZ@{!Eg(+aMLmE_cIyV6GpN zoo!xjhF35z^YotmAoaEnWkslwZ9H*OvUM>M&TymiAam=@t}NlZ?zPRRS$tGz(h{pg z*Y}izk4rK)A(Yh$Ct*KN*( zb#yewlqtoGz674XG%2!&4&kv9qP^Ru%#KkN_j;SSxfKI_AH1JZrq4B#tlLhk^iAzh zeSMktc5t#-X9f?SN&t1pC%}I=sC>$($3jqtTfv*8mPV-&x##F;XXiKt3BM^*bO2)l z`N0IbnygkEiM%$#;(`KnJE^m5yY_<%ywQVJ6QyO)mS1&3J%U|hxE3&tSLV=z1IYl@ zDd|4>tx*j`fFE%4@bL2T3{7m2i6yLFCr_T_LH6)`+K?0P4%Gz=m$DP=M+P; zt6H`iyF{%AHeIc6WSqu|qoA+Q$&_OUkQrqYf7gxP` z7y*}G?t?Ex)4-2_vLQQx8~Xj!AP_FITbt-XEt&sqYD8lu)GtX@vo23ec#iuabL~Z)#p1fsmKCILLjeYXtG* zLm{ZSgl0cLzkWKD{0^y7dtabkqutl+%4V$D>-u%@oUqg|< zu_K*saho2>46Z6(oXj;We{{#x`tZ(AfHohX7V0k?*2gF2wRQI>a`Rhte~1yBAKS zUz@2^Vn<%(X#Ws(tNhs?cFDHvHn|SO=V(;JzKl78bAKqv$S73vx_k`|`k{*0ezd#E zi;uD9-cW*1_0!DIAimsZeqv%_VZs4yU-2k>fBC|Y56FhC46Vxl;XPQk{M&iZ=qAD| ze`1LgV%w-omjBZ4r1%iD*b~q|D>Jmyo64Pecn^iCBa>{0Wg&}Zu+>OiQ9SD*Glq*t z$xB$}@%55~KZRnV(k@F&9hYp8dc=tPy^-FLSFG$387c^hjn&P@(b0s2(b4`r+yW=% zq#myS{;XNULaETP?5#jYBykynR;V-IeqLo{%Da?yt{;IS`py!+g9GBX3lMX(UY|P$ zZR3>gXS5Z3|J(>-76I^P%CB#e#gH;Si>Hk!g&G-4AEi6)Lf zB`SQNjNwH+*&j*9nG$R~uFiW~PzTo*+x8!=+jtnrBbJ;|_Ym%1Rn%fqH_ANfJd^8I zdRA|Lk)QjwXkq=xBJCgs8vIRs;5@?wR4J_Dz);(zk0MsvrDN;-D;6j6(S!6^>=Mgz zwGsVGNd>XFz7aSp%d8rOj$n2G2E}sM@XSoFfvlQIPVl|tAiI&+ihBmCU&T`_V5Rmn zYDUKFAbEPm2Pu2kB?`+S7OqQxWYeP9{byn2V|&E~1v@h{(@G%#_K@AQg6^gtc(UGT zh|R1s@iI5NS*izNW&U7xi4uO|_o(K;p!x^*?X9h?O`21A_#Nw-{^qk9ko{>IEYJ3m zm$AbgZgyJK%FFpIJGvA|rzc*#Hr%bw>gBau(rILD?$Jx%rY~Y#V`N=-AlRPBhK6g; z_w6UkI4-LzeLNu4EK3-$S<6mCmW|Mb0utF?cqs`}Z?a__w7E$a0b0DweyE0xdo<3MZdlGu*AA|@0 zE1eifGeSf7@AF6hN$)765R`LYT_u!sDwW%|cy!*p+5T1grr{7jnBOvILNT17axMp3 zM~)25jaZvkBkHHTfxI+i0Pfz{Hr*S5(vXdf*;*_t0V|-Z(2l=&rw;?Hw>xdX*YutR z1%vQ~t3hJHJ^{<1d$qB3t;hJ>5YSbwpjz0i!wj)4f9olyt<=m|dvONbFedAvd1yB00Ei!J^hU*N)U z0AJv#UZ4(N(e+K zw!WiGEbWr}%s@Dc;R;)+oDT<6`?0>dlKDp6H0T^75Bm1GJauK(^#Mtt+Fm@nHGUso zzAHGJr62o!GS-*CQ{CzlK9!)Nsj||I0qA^Pgt?$VpW=@~8U2X1Rrh61FCs%ERHGA3o*Dr>}aKC?n67EX zdUa?Z^ovdS;Qpg?Ljen?hQpgYkR2AH?k6d&GgqIh`pro(rMxZ6GFQCT-1#~*w5cgH zbZu=K55Og^LJRjO`<;OuZ2xgpzZ^A_beOJ$)35Z=iy{%frMhKYUK&d!_}HvX9?+Db z^&n;M1sZ9;2TU;_|17<*s^bq{@9O%CaKK>W-u?C-fm`*y%25Yn2rC*eolIU1%Ki&r zxPAL9I`Cn-GFsGx@|BC4ZqMqDdltcQTc5gz%6??93Tu3jo>6GPDfw&f2KuW4Wi z!l1+AqLwDf#?w~Ex6H{^ePDSk%<2l`y9Lgw2OZesuRU35ZpP9WxP(=;Rm1nfIdLtx_58F2Ry*<(04N0A4A9pRO~cq!>> zE&a~TCNbsDo{#v8&;Wk}kjyIeX&UZ8MJPbyIo#Vn9CiumPq!C_gwV46;vy>&CzCl?L)UP;`*VFcG?ci)@S9FK?WpIGPWt>YX+jXOFG7Wd zdgjqU8ilw5BH%=qkC+zEG*fRO-QshTy{&E6*R$y<7x- zJ_!t>m9Yghb%t;E`e{Xho&5m{keSo!=qt4Yr8|CHOK$`+uxEXcj_vM`sneSzC|P z0mdWvr;y&!&8?(zx~umh2g+8Xj173g;0Yw$DxzOw`SB{>CWzt@N_^Zz2Uu~3%?OJQ zvv_MPCTP>EhDA$lsIsKWe_W@=gVflK6x0E-)?>WTa&(1T=@>0`Z_En`uTP#1Rv&;5FB>SdI*6zII@Ri1J z+ZLTPlu|4dnxpUXsHjF+=x`VX^t#XX)n#rbPo(T&3Sr=u0GAMwN=f%3!Xt<-HaG6? zn1AUrzrDRV1*mNC$B@WRpODDqZHcV`#LQ*dB-fCxb{4B{sm4)lZ9wLdWNxUa`9f1H z+J;0pSKn^Q1{Qsd=ya)YdOPuwz`5@O1C<`&YgBIaqrE*H4t>9WCy4a)qz+z7{-KUs zf3KiEqN8n5by1QTZ*S$MaZ7@xqm3xkaG4vf^$={%O!^oVFushB#j;9_$mWOz*gy2+?Z73OeynF7j%APV&p^baZ%dB-)PfXU6h* zRe^w0!eULsDfr#XtJKS8W{!=@xw+)}_OQup5G|9*bM!}VHl@F04~e9m1^h1NZ_X%p zhMW+l>C`k^viRI`^t#ngPNIh;e>DYV$ra;HI>qwOrOf!gO#jTsVQR_&dWu&B2i-j_ z+<#H_RMJYx8|}7h+yUC^8heBl()sY$1Ldfdm3cu9yt-;QMdm&M9DTl${hz0@oGw{k ziQk%VR1`u|1+S*%Rp=}VORz|EhN+OW9y5!kF}9=Kvet~tB>a9ZKo4ao1}iG?yo@2THD5GW3(Mjo zRbjz$>jUuv5{!1J;J+}YHo3v63&I$+}Hji990pT5f;)eS>^rA zm()(3S_l19Rq|XtJBu>^ws%y7o^zHg2DjYrT>=$ByPWBg_C+c2OL#$2Rbg(1k+#;a z+0*8eF35nG@TDDXO-qUX;p}R5R-uVk-$}BmxclrtR;ZrDnfh(r%&o$dvv4)+*zyOM zQ@)xLS+gV{Qs`538HUQ;6x=GY&Rxs1Ej*afjyv<~rbJ$LkzRMa-`lDRDu2gq#GQ+U zOGFcG?{8J;R{8E{#@0NX{>yzeviGpR{QU)Dl>7zG#gbiH%id-<-0@zUcLrfn#pa|m zIdOIEk}YJ2KSSW4^fSXI4BXQj6Rfv54-wD-lpX}+kdgiK?SDIAyKzP2>uI4xS~*Wk zJTW6Gv9a&lz>O()Hcms4QbO^e9Qza4li;kpWAJc~pNQA?>ro^$y_?!D7%;aA5jFdU z%I+y{6JXQL!bkj>>hNWqSC;R<0X+o2+sFI0u7uIH==^iXmEL#%`8MelapH`FpF0(6 z3Gh7LViM;-p!x3OiM8fuN0thUdf8-jO!sgs@jEo>w=I9RQo)Ug@wTU&<~sj1@~d6T z3s}(Hx>OAPaNGl=G8Tb>v6!<|j`_y)lPmXapFDGS`89+mv`CIfQ5C%Wu3)NDC~NB%`Mxs9=j+mQ!v_~8+0bxmx78T4z?{X zW^Q-;wUxQLp2^!ZcDHbkDq8Td4Zu~{lX}GXt~)_DB!l+ne|*fRact^+VmATl^xI0l z{z_woUH~tbOh)gs4)TCGOC>KbOwc@za$|bcK)HWo*cX3((J>x(>8Y7IB)RvcTz0zg z;Vc=OERh_9tQi3nCB>q$v=kXRa>5whvfjA%^ONquix&|Of;qCO?3XTN8`R^i z0?5&LtNiHHi}QtF^mBxDttQ?F6K>e~4PLZG*9EFKM;o1~@&Y`geoW1LA>PwD@NA`E zQ>vYPzRA6`xu4^;R;D)XtjGMqRA&skJOfREe! z+z{q+M3{4qqVU|v$Vfc`0Z2*c{3>t~Rhj7$TT|0LJ0i*1)q6P3hkMWOb2fRx4b^3MuMN=`QV&-8oX06ezsYg!@B`zU)QZlYz zSC04c!--p4nu}wp_8m2AcD5=-+A<9lFS*R^MU=|}50^wY%eU=EL|%IVVoaQuu*zq9 zJ6ZI^bk*L{ch^2W5lM4NNm$7N8QE%1hn1*8c*Z01wKMTNx;AI>>NClLR}X3l&odH= z);O1ew6RS!V2W`v!jmtLfsK^1t4Z%IOsKH8Z~c;%Hf>al+;^irU)tF2%lnBnm?2yw zQK=qdYE$4BG)QReExYUvI@=4+rH~GayEnHP%Znnv4%69c(QGcBw6V~0kqODJKI7m1 z$_QKZ;zl%O>FBoayC#wtc^}h zy|(VJ;}f_iA`sgi^Q+$$G9~g|K@JGAYF|8+ViAV3O**|IT_0`k-zS3Pa{TownG_Lm zv)$>M#dFCo#=-YB0l}?OxZ@6vCAtZ(+vqu!=^Z^eNJ`RbgoAMwjr)q>1=V}jaINK) zMH%w5@;7M8XCyOkw#wf6HPW~SCRJ+vK(T-k8h^C zC!hs$o@Nm}@{Nf;e`5d36gf)#$ZpJm``PbJ43qD7*lFMD6L0Bn4UKBk#+Buqo#j_h zPZ10a{n=Xj_G{5E)1w;7l?U}o0tBd_-MJjVD!V^r7SQIfZn+wtdPuS{n4is5!I0ah z1?S`eA`>!X9cIy-1V!cqa7Oc&GQuYQq698ZeVs-q;hHiErQXh%|%YrEqtq z{rKBicD17p4MeSD;eX`mNnYeHYo{fj_}T4yJR_~u{cFYh*d0FqvV-+6-P%~&_VV(>V5fqlOdwnBzz1>hLfy1&SLLkJwpYz0n zf}z2kQ0Jw>isn`oj^-s`7n8~Mov^P(C-y?|=zK`uzHZ zoMBrj1={-P>->D{U{f!x$-s=T&*bp1-%XueCJS<0juzW<^f2=*4RhDCU6{qlwb%FM zVHucOwVx6<*-)uXwPl&3^Qm6DWZ^h!@g%nB{qoq>Nuj;Lz!?mo;uvTP)fHitrW4HZ zJ|nY}ek((0^P3`SH{&kN`{A!^CpA4RU+#4?2RRVut*iGh-pDm#BUl(FNo_l>%nc6@ zRp%?$IX1E`W6ZU*%3YfhSoE4)Q4QAuC~9iJxP2#D_%-bDs0)jV2Nb8+iMuC=U97T$ zJ2i}L`f1nj>E2xKC4Lomu|Ah&Sr7R)^CKz<5XBBVE$i6nepvVBP_2T@3G(b`P2JAV zno*rx7!K2f)#wk>AnF0Rv?%TMZQEL3<`1q%{=&hu*-z-cZvK5+Tgb(8+kLN@Zyc+$ z=jCoRnlD>)ZuEc@l&@xAxvf25-?B#U^tdwL>B@5Dxz6F)eujxwxIdAGCP~j8J+MGdeeA^?C}=YpdY39}TaqJGgrIj}c}@!<_~# z?AqoyERj_|q4Z%YpSq<*TIPs_*uct2X0qS*=4w^K+h!tgE={>UxfD!%s#t_l5(QHz zIl)c;co1CJiKvZ;)m%}!Xu zpo>F=zKY?}Cw8?g(zG#KP8QjeJp(DZ4Zh+oSd~U%ITq_SQpZS3GK%hmw4(N3R4Szr?r1j_-vbuXeWZfyOW)@#JMgH0BxeQ^=ymhykhLBttVVb6Dby>G< zDPAWLX16v1xegTsv|?_gHIR-?!JHcG07*OCCihg~H&Bwsm3)1%0d$e!Rs6h_r@fZs z9Q6V7C%td|M3JMgIOwWVVn0$R zWzP28Y+@f>dWt_bvP(rNA*;*-gz7adh95WGCw_k%4u^G9r=Ay}y!92gT9!}sYF2}h z_+^ASuv45_PRs+GRqm^?zH;d*HW80`d)r9A!F%LbcrywXZUh*|F+810KG5=)=d|#T znOH1I(Zg+pEiL)CEqr=D9|aCKJ$N6|+PDpqSW@Zf!FsEfB;;-D*>#t?NxY)|l0ScQSyy1mVuRDd5doJI(W1kPJC4j0 z^=d8HU!V#HbGKg>$&!+pqvk6fa8ov-8yDsOM-L8pGx>ux@}5(?9C=-c4PmAqSVM<| zm3OiLX3+}eT_Dsk6y9NJ5j~o(7hKKPDo({)At&f!Mfac#EY_uP#7tu9&nBlnm(!LRVdt?5 znPCprkd?%~eZTed=gA%cfp*_uFrksmZ&qi1?cHAz*CIu`U$%%a>OHUb`l5t#>ejqK zDs4#^EGGl_^~MH{R#U4jV*SHPPygPXI}Q$aPKAa?xU#UsgO$y%HDhCAW8W^mWoD+o z+C(W}E!^mRl_#-0nOp8=tfX$4V`P(@WS3FF-r7iWWyJOJiNj&nDJJ6YeK$l)e65`v zqr5!29`(~#K7CzHEjOZK@Yk=d2ViG1xo}pLd4a+5mEX=IR*bw2KRv*3t0?J}m~@fZxtVXydRn6B zEBDwxWF@qUrBGkrFC*qvu$~)s%(j>nkbw(R)$0uh1K^3x^zgHzXK98mz^mD|SRrbC zQ%~a1>F-sbh~Pwg4W%SdVP$rDdaxWw5}nf|)s+RcMwr(W3v;?K$+}3*Ygkqi-6(wx zsf?d6gC%$W*j%x<8%C(q#neRzX4eML)~R#Rcwt&$1U&B=tzTLDX90-%Hqpbtr~ZWJ z++IxVbBa!~YHfaJ#!~RsC^`Jg49S6j75K~8ViY9|in>wfhAnAzpEWbehdFtJr!jz{ zoAN`Hr65YAaw}i*@gHzUgpthMOv{an!+D4eXR4ZWazttua9z%0=~lEevx{gQ6~G@X zQ<5BJI&F|nMN*VT=R@_itX{&WCmqb?b=`%ciu`I%)013K<(rqsfUed)9P8^6bZ8`K zayDg z-@njYEk2ERL`SD9;8CD5#BWho!=-yp8>MZlriSO)=%*=HnYo+L&z`G{P)4{18ksd#OC6lzi zE7~%nAS!8Jj8I`xO&(&G>6EeE4drvgHn;1){zl+h4X)=^MGK_>v-<8sDYq%Pj^N&{ z`!@snMF<=4U0u;-ejfZ2riyn*)`wBi2Uue>cea*YXM15w56S^~7DM3HReOc^(^d0) zB87N!+}PQ+MV>yBaji2RmMObde2=(|W`XA#lvId|-(n4>JpMQ-T9BA>Qlt=y z@3?(;XfA%{m-X@1u#bTMK+XM*r1uhaiDo@qlF6KRQ)Y_TM@pDw&P!hz28CkQ1!1uy zlmo{O?gm6RV5yS^8DVHr_yIGd_TK5Ryc65qpJ5!9kqLZfznMl<8Hos>aazf*Ig(Sw zXsl-Wt1G}&J;ehJ`~c$BpTaWU5h5a2FB8wi~Wz=PS`|9_J@*2l5clP?Vv1DntIzo-GKVfvuv7Rfw5yNpU zCl_mxoY7I8=-5&|_PcqqXZpm)R?%IigT_sRjkljfyCBl4SJ#fa#4H&qidk|WZwGv~ zL)(U4ue<#-!ip7{PfWiPWMY!{pdW* z@t0p@Qtks@DAABl-6W031IMbDp}w+;S#ZrAR5L&^z$fii zs%6?*t)eA^{&4gp}ssJw}tIPUyh znC8f@Uu#S8@$HwU%m_CU{frI1cehVRUux@qNg`_cDH$pLBo3$xBllYcc{xn<8I#i; zJOV8(;L=wU;dSPA-2*m4-{GYhf?1-vYJT$b;bHw@nL|w~Z6E1cAi9ob<1ECN@-@b5 zdE4)rTvTByORb(oA9gD@rQgB%ysSuvrQ(X+h{O{2Ag7;CB0WdfW%Yl+Oos@XTMvMy zU2$)6G)aIVcLI$NOX)b4OFU zU#`A_LIUC=FUN#3iX${lt3~Ti8vD+kF*Sjl&uFXY<+kbw(q=`3EN5(_h+ttBF+Y3j zhT!y}jdXxtdTBf@lEX{(Le_HP&9#^C!&i#RQT&9O(cHF3p*51`E!zMAF6{U3zg(j> znB_$4s)|oY$`KbQ;+;D|V8`qme&w51P>|o1h>3FN1mwh01(M8pK1Q^ZYzYHicWYS- ziylVDpFc~FI}c3Qb-+v9zrB;hCAKlGQr)4QD-=D2D2`$t1ieXtjXIWMkDv zp8Fgrqn3)~{R*h-$#ag2R$zNH;)1Go1_fa^qShEKc@t(GkbdF|yTl zI%ULLYl!7#ypxkPVhELOtM7DOsrZnno!~IBoK1aCn`6GXn8Zk3IxN%PWlorA5meA_ z8e0v@sFKuXD~jq^$#!h$$b50wyME(wBC10F2zP}$`NN}2!=a(mcy_j8%H#HB*v#6F zDOhwmBMF4!mL`5)^6CEQ-1t85fK>JPqF%4FCIyEd_9f%+5!m78XBIv6Z4OGhY8hdU zUc)TR?JO#(RE8DdhTJaWXlV2#_HZDbz*Y{3B5?NQ(~w>RsmdE@5|i7oHfnD@u+ zEA_m{X!%4UJkQY5u^>8zt9zTGlPoMT%RhLu=nkAapp38h(UVnqSa%XX#?fEdJXF%m zDqvrUSgbcprgdFoYxTRyZRz1J{SO>l;+}W>2oD2G``FWSe1cQpx*8KXPog;vJa*15X*w`p)-V;2#;7lPX{+sXY(3S887AB=@Hz^ z5I`NGx^T?*ZYf`NBo#C{Q8!UIj(TIGArLUQBMs&F&8(uMcRY3IqA?H}XI1`cG@F!; zHMIa;Dx08{BB)X*h)dBu&{5L<24;YyE{ zxpV~x1eJJzXD337li?(%$Hht8!(g&2gAL`cvy8BkUVh7*$l-bu;9qz?+abiaMmUf+ zHkPNCmI!~gmDsnmbY`SOSWf`YniWJV84g9vJzSpp{=F~Xu>23Rh66qRup}MB;GdHR zgc7lXuQxsNR^!oOErvmjtl|FAgz3A3Wf>VRgS`Ot9s*(BRR_bqpuc=s8W$8Sx$@|M zLfsZ%%}-uP7wu6w(Zqb_uv|DTkDJ!I0+Y!XjH#c%ZkIB%h;)oT^2 z=|v3GSTqBNt(qFXV54mT7c8||t4v53*$#$h(?|WzyC@(8CizhnIRj-{T;5=e)|$cu zdQa|Zf#PLk+>~v$=Em^rAi<6l3LAh@b*_M^qtk`asojo%{k-rTJWb?37m2NdxYW)t zZPmKxEp*lUf0N| zcSFmRTF|7>0q{{ncmAWknWaIM31kJ%6^dIaBDP3PGiQu?-S0O)^a8sE$6u2aBc}{JDR_OdwKX?$7($E;FKt6RVE688*LLq_j%|C!*(P!4rlBLEKJui~ z9`hY+7u&lsI<{8zIm!~`(5oO?7LEb;iDo_Bg!-5iguoX9Ski37f{lyJigQBCK=Bm$ zjpjx$d;N5i!su(B{QyGW)qd!9q-+G8M()X~Zw=;9$tI5k9mNBG!`K7mgr#-}YjFb4 zkMLa1>aeaxZT8626Lv9mrK@qN)BCwioX72IF{yHe2;+>AEDSvb@Yz%OsmSOZPE(h! zC(EEu9Ev;& z5yT1@=atP`7 zW{M3A+HifC&XQ9Jw(ycRn@zRn-C6)oLCX(DyNwOYfl%9dY!Uw>wlHNcqs1NLKmJ{< zfPe9q!L0nd;`m_aGN!64cx!e9lt3#-&|I``2klxlvWM^nX!R*qJ}s z8PG8TU%%CYsPXf-KRMP;5(8BgeC8XTfC2;b|NOtMr|?e-FQ(4{c>I?#h5t2mV21xm ziWU5Q2H$f)_&dK$okjm? zg%1ZUh!i9XOf5xuGcWUR4@r$hwt_kbAYK!Vfw*M~k!9=5>-_5P z_YcUFX_;JjoTTSgJOvyO@W2Q_1q3Vw*?eL`GxA;Ad?F9nDTrqtrhnSIb6ZsgtvD{E z%Pj7JEdA}S^E=a{mpNarPJ}3N zoIBs`v*{jZ{UemQILhp$;R>}g3w@yHx z|ESm2$xuffcv7;( zpV||}`GWop)t&9elyQnJHYW6)T={cYr~v)Pb`TRxOs|d~y55rYZFJkv_H`*^09B8m z0leSU6nSHNc4RAdn^KNQ1=g+-YzQ110E!0ccK^<7?vBX6utlri&fH#_-{!ThUu@ml z#MT_g@#aHqGrDx3O%-xD(!sWod#P+K7s1K5or^$MWz&C1mG-xW7iCL|Y03eijdQEO z0t7~b27?vGH1N*ES-$$0C*@%u{cR>VvAU$oA&;W<6qirP!QthCDmg*X2eEu&?PALj0f8_sLCPdS^4> zx%BmcZtliy^yvm`caXk4%-a5+MuJMDP;6V8TiWO#FmOiFVwKlNwrR*%Pyr5z-tNwFa^o0vt z4%}IYCkSWOR@c2H+`$EmVEgFB3Egm z)hANrE|$8DbltKZq0R;cJ$WKgbK#PSccf-_Yb%l1+Daz{dw58ur)}O3x+BuLx>{sQ zCTB(eenucT;qC0n8?meEio9=!*G10iM%8}gFnr2z zvkaY&kY%uk4829n2wSL-o32uCnpSK?6 zA2jKLwRG@kE8Xbm5|~PLkTSo$)x@rw<~xmH5!6-%mr*T`9GNRIG-#tsNmUU> z9Uufq=nRe&V)|5g+w#HGf*GlSru6&wj~~fwOD+XQ3SMZV$Qu#n)(^z2l-RT^&DJ-T zva?Noi;<%Iwnc(DtwF;r;X@w`HAGLRD-DvrhC9Z%i|0*h_I>Tt)$Q!G=L?sVEsShx zN|j@0=ZY;fxJmYa)i9y{5IFtnY#|PGck!S8^<}+jgj`N*d)=8{9eBC_!TD;srUgt^A%uGqil;GVTb?hfJ;h2NW&8+IB!lZEH!ZZb+(bm`2TkCl1 zV2;>}R>AT%^81g}P>GLrOD>yu>*~i`5-b`2wzh-=1&rb4hgw-}ouh^&tD0qQdsk~3 zQ`5pLVmo|RbgsXVCnouY@`!^N-3tHfmQm&IWVi9-i9wa7)RQ+e@w?wonrAF;kp zF%Kk**``iTCMQo$7SsE2LFm}`*Mouv%g6C*+E*I6k;5i&@6CE$Ss&~{$z6EwWSo0- z>i07e-y^08L)cXZj2*jQtZ*!x-ir&0p1wJ56aJImOCJ!OC?b-|0r#kc~NkZY#=T2%JxOQ}uG#DELBA^ov^m#9{xiWRY zsK5N~{Yg+zK9NM*+DccT(a2trv)x10ZEbR{-)sEHFe=4iso@FeZnyB`!-tOa-MeS! z2L0&`#R9>hB}GOnVc7d5$$vK#dwUK1=fti2^?Hk^es=)1W&za9hx6btPSlE) zjAkp1sEWbZYTh^1jiyyqRb>DhH-dL#nZHTU1JhJ)C0A$PSAM}PWAju?HxgUt|J#AG z^2)bkeLQbu#T8NBI@OHila1*`YH(>d!bcmPBqUU1k>l(-AKk)9H)u`>ql`oIPtmwq zo4f0MBoGK&KC;W9i8g_l&5RBS%%{=7md2@P+Li8JDFm`;BRHW0%IAa7*I~^|@g0 zKyakA=7wT_=EGn~fBmA(1KOzI0Oxjhy_NP6nT%)Qe}+IOGs)TEd&WTIA$CXHA$FpY z&~fZZ{Y!o>k$EX~bsz|7_A>p_SL(p8U;FvAb(+;aL*i)YI5i?D6HrrJTwT4r@80z; zBSgHFbU)c5J{lCenZl^$;aHQq+^1`MLuy^Pt~r?{-n?kuUF@C3Wy7PTH==kYAKJA% z$7tF6E9{jVZcZ+%@Ld`s*SE$`Eb*z^Lcyd^VXM(-A5@K-5oRe zax}&0&PT_Tj;$mbp5%yn+FMZLb=>L^M~8;V>{l-oK0mp#WtJJHED8;f;MXmV5H!qR?Rk=6jt z2^xarcIQRI%M~_zTH}aJP@bp|= zTGH2FTEe6s3Ex!m7#s5GOv}rx3I8*;s0i4H&7AnO&t-}kCCV4scD36HFYBLwk%#mzZ&b?Vbv(%rt+qAFJP4c)MZD)Fk z9Q9gb>arz#x#8Gl4Hc^V;O^x|HNKdoanQAFW34@OXfUX8@NIQrf#Nd$YN<4=tfn_n>*>HpzJ(xpmrR932t zbTaG;ECw`ScXA?NGXod%twD>Dv2(mODW|h`OFm-7-d0&NxcBmuo0*dbrZu2x;uA^# z(lV#+A1(%MPVIHhmV0z<%`V31aZ~KruSDMen4}7NC*<5wupZ^__>lCj;An9pB_ezf zBcaaOqSUc8laQ4Db$X*4zk8OAMkCns1^p)2x9dEz7in$O1635Px7XVwZ1)Ven)F7K zHZ};IG2Cl}&MBl~w{iENq>)@ebF;K_TK0$r8!0Wy>D`R-Dz=KEBr+evUl?aMf1cD= z@-y`S$HtBs(Y~*kJcOpy-t?|%ZI+V9a>WLR1$QXyFMC+e*^+&5MGW@whcF61XnZB= z1BL5`Rj1rmZE?j6?%2GLyV$+Ku9_p;N*;|wfTh}vXm5(l~uC(^Q7cGWcxJ&-z(KucBrZ=%+Hp=;3&Xz^A-dUUdeq_fI z(&du*46YN8+oFo$%fkUiX=_cNcDYM1V|im64SUd^ua>@}oIF_+>&zvOT!{%DFcDtq zGuF`H+K--U!f}XA_R-h`?k48Hv2wHCOHjQ#JpYVP>)JOBL%_nMX;p;QICH@I5mV_x{e0v(J0>7~dItyk}($5eCd;&ZpeZU9S7Oi1K~L zNOP0EbDi6ZEC9Kc(Ei0%@fKKi1`LN>UWi`K6J^S=mu~g)wAtMhqKl9Mmg6{*v*c`& z+_R3^UFFTC)owO{z4@p;)f{9XCP&5%^ku4i{X<7Oz0dUOwO+6GN-v_VQ1WG=^r}T( zzrMPdfu85i!sYagxvA= z3#sV&^OZTwcP`b^y(50vTt+t|*wu^QN!-mBJhgSpH)x<-I&7b2KBJ;FU(qcC% z61A?mGOx=ofQ)Yu9%ffRi^v&Npd?buw_I#~&yp+Y=S0e79oC!0AU17gPHo9C`~I5L z(>lq~-t9MbGzSd9MAYVEgCTQfD+~WU|&x04+X<(mKkTweGeI zTGiT96NZK+=Q0oJDYCHDFx$-mjxbD*ZGh&@ zZE1>J4ccZ*)!DkbfnGW=FE2O~2Ex_KXONmg8;Y`*GJ6&+izD#NrwZN`W?CJK!PBWR zmTFP?8-mXT8VOMixjrnx!YBL6eN>zz=OeYXgH?=6CGvq(9>XwsD`ICd2OGSxt!0^6 z6;T{#eUg__`VCQ53MtykSnuFH_pN_?K0#8K`Gzr+34SWa>)d%o`1YIBA!9NT%)?B@ z0gjoPoYPgfYa}T*mf}&8WMah{FF9fVcFFawy!VCCp&%5QX2jMmYzO~rW*k*9#OQgw z`DLAmkJq$pJTFQ0m*;)?{(T||?HKG~Z$E@v(TS|5qiokpuXJOb-lu=KXB@EIlCZS} zZdPMxWTZJBv@RNsweL%H-2Dk3HJVjal9wU|{i(X&+3S8B%8YFujK3~ewdon)M2FJ|-6%|hF zc+|EOvyn};Rhch43kc9|Km;(Sbp`Ze1a6Z7qXiqHwG7h^I_~ddVHx2I6A0){FF@232G@$G zW^V)?N7i_bsL=Ad*Q(UtC{ljQ(5CVNL&nHngWIjm2-b@7#H|pZcumKru09>>-cLW_M{PWOwD6Uq#P1M6D@2 zJ2LB{inKIQjkXWxUR!TH!b1mNP63Lz-Fs6$TRF|VBp`pQ@puM8?HoqhTTYE4Kp?R2 z`52Vxw0DItnMdr;KJnSEj=v^&ug$Vjm8#}6n^0)5g21w4*#(6N^Mt9pWD@})e@~?7 z*&mZ^?HI@>5H`9729jm4!ktV?kX0?6CD(VQG7 zr<@$0`QjGhzMFC9lU>eTwcg#E=)>hqw5~z2!hBdbl`2$MEO?5;@X6V!}KC>w) zHCvvps01hHceWqT36gK5$}Z%lTHa&~kt9o2a8FIw9=%%$LP8_YCt7*lV?1zstvI|MEIv7M zp5OX_1UK@Ob8y;>6%9^OiBwJ8*7psrajdpW6I&_ZQg3L_=is|5Z*;Az{D4d)4*DAlC;d1R`kq}penyvNyYEFNl-`R_A_*=e`8bt7{#w%VmSy4z!%SX~yTxB(7zkp|P zSh1JcFt9%RenK#lL(>cWTu7)EdBbhhf2^Ux-6&6`SA2ejZ}Ex(XoXx`>z*-Gqk`t) zdXN};VDdLenlU9#chv=ikYl)eD`Q(i1zHKZ?ZzoGyrJG*CObr>Zqp>=VdRWfK}St@ z50CsYy=0cqr)@dtT#p!b>=NN;`4>w#&&(<9N~(5C)lenulO!njGQSqah=H}^n+*Z4 z!0vXimSpj{F5N9GoZ@A`DgVK9X{n=QX-U=RQq@yy##wXA9+3Ndc zmi?EVMQdj#jUpp1HX7Wzsk$o+-g-7=#bytB74_X3^)Gonc+{sY4iF(kxFmy z`L>hx8g;q7@4yL@L}BINW>IAJ-i8P8vbh`w=DY$P7)xzimL?2fa^tL%)(C@pXDy2aBi3Cn_53l z>^Chj>-6M2d6JX!B(s128(uwh-3+osCt~VLv+BKNB@^CGuOG^}s2|FprK;u4)us6= zyOvi((xtjR_ajVi)`+(w-mJdAEF$7ZUyn>VrdyV!K^*+gyE&itn3u;dfkqPzf{Xq{Qwq*f?j5M#buna+{}wvm#36Qqg_R9SGB+14D}mi2anG2~FFz44 zoLA@6i6ERcsnC0=%6gNNk?7Sr@~Z1|b5K?@*d56Dasog9P(Z+g!r7JRsYxIt#(%Te z$mNV8*(56{BKxa-1(gD<-LEM+MVXv<@C5BP!Ad?z^sLQsxAF8hnZ5|hDwfinu>hf1 z2(Q>{5e}n;=arSUNcX-RfAXYNar>R1!q1aD0bA6~J0=%q0@;FC2%XlX@3uat{g_>x z2k9_@-*o3lw$aZE^(Qa~B%d;TwiJ~9sEesA;_5Hc@OsTyV>EoMcHPo&Ax~kPgjKIT zU}U&CZEk6T6Y0J9(+rs)XOi&ouNp#)zTvAg z2sB6p{vkuW$xM{vPd~HDK<8#;CbjMEDg&R=WmH^D;`O zuNcYbWH^T}a^7>XbZYZhfi5TC`M<3e(>d9G-0XOEu%!RlL2o@P-7Y@C3D0zSN-);x zp`>dO_oS%muJm^P$VeXFTU`JO69dpE>aX5L{?=A5+JJrI1G(wFDH=niNRYqDA!r!4 z8r+gp&3oK3RMm~AdC-{iKS#0Poi67c9WoUXI>IeCU&67?3T$W>10~V+6i%@&J>I9w zmMzh7Rh$Ueb$|Yx>Y#iDEEo5s#XiSpyl>!fdj+2!OJQF8Y`=$X_yPwvYp`4U;nbrO zwI?2ox>y}3{rbJE2Ka;|6%2pspW+pyZ@ntkq@uzW*hKbo6$jIuu}+ox z_!Y&X$Qi5?ht0MT%?BL9fkGGZ+9Dz-l!ypa^av>o@JAQM|kZ?uR>s$7Tj@gqS7mYt=6Jxz)EZ395_oKE<4DXMz>CdW=Q`6^D>{ zM^XWx4XW&D=GI$Trd(3rvbx>3!e+voi+J-0;yr0@w&5=b)^D`mbndGkovhV$lG5&9 zoBYWYgDO6YNW%wAw9x4zA`XVdf~>KP@z29=WUckjs@R%e^KK`3m1e#*?jC8Z>Disj zRl@d`YP6vny>C@beq$GLj93WBU+L_|TsrfF!OHCKC%GrN^L<~928y|jx~ zJ!KVDVK`cJZfF+>WTdJNWV^FG<(;_eM79Wd8oKn8>MqB{^5fP`>v6x#f|!#BfP72BtO zOnG`C?jXZ9Vj@J9loN(?_`Kr;Q5Eig{pmj~eP_ z`kD$-D=!qwB`YP>jWc!XJ0M!O-LtU)%5NQ*nx2=0k@DJ}4L;qbbnS zJ2S@}I7RsMX(9>ff1?}~s-0DmVRo52E~Jx`W2DHEVC(B5B!7GWDm}4q8l%wGn5aH{xhc*KW3g&-I6--*31T)Tz zeGvift9g{B(b0Uof&ytCf7UrK>AKzgA62Uj4bM4>XvgX=oBdYS*M3QeGi;U+aUA3y zbh^|G0QTv1+kgS*TfY=U9TY~2i4frs7dnXEzrV-7G#z6dLQZuI+#f#weU>?Vlry~8 z@$MCQCTyJvN5ONf&1D|BUgK$Te^dNDJ71B)^7~p|^#*_YRpU@ogk2HaC8GK!M%U0a zb}o62)-@Z!e|4S8ubi1I@`OTtHx_;Ml|Z`~Oba(T#L$8oiYTivj?A~oA?Z|vDX8`(lhm~2s@N#_*IZXSAQ%If#9svAO2?s?QbRol!Zn_NPS7@Q zgD@%jPPheyTue;9jPoA;{{IsI_~c(RGef>bI69F0l=p!pKYvM%Jj}Z+i<#@6bslwr z9Y=E$6Eiat0R~EgNlABrfjt2MdjLc6v!n#*29Gk|u7)vt!FyO!3rFFx`Zzqi)OzIb z;lsFv&%<}NqD%U-^h$tPJ_J3f9CZhG`gbxMQCT}og@{E*m(rb??}yqoVzi-D^{?;| zWGw1{;3o{up(?rzd9Qt#H3Yr|3$!OaI^ojzf1pSHWp&N}R4jBU#z8Lx zN(ed*VFWW6|8%g-}<7sS+>1$WcwE62SF+Md;e&9gK_w8t+<^Y2v8mU^7%8C^WZ^<2W1Gy zWx1mxfuz-7_U>Js_nQJDAu))4nB3;Yiz8ed+@ylkd}Mu?cBz~xjoLEyi%gI?K*Yklsd3m4BwRz(1-BoC|fbq#kUi247dU|*in#eEYF$kZmz6nNaT=rBx zjy7LK#?kU-Z9V#v{Ff(@APc8Rr?)Ti1X!XV1t2Dk9$B)MHf{;haXD;^zp1jCh{?+* zS4@mVRDZR8$`Q`P!zJ(UYh~pN{>_Nj2ZEYAC*H@J$M3S@cDp)l^~j;Qp`@~#%OC4P zIy%_IM1_S#MTLLXom^5WcJo>r{`BLAA@yTdPEnCjJ_+YH(K0uXi^US?1OuzA8q~t0YKEZCQCj!E z1$BynFvM3kRa~soYYOy5#E81MijTBiVLcg9-`vF&dhQ*5-PUC;9$9D^6Qf9T_P%P0 zs~8LC^I(5V*e5_AzysVRerq6785@rLB}B_(#8(tJ*;UPL4P zT{#_Lq{Yu|HDvvEL#drRV5r2QOow(5wv9KDmS!x`mOYKmblK-u0?;2-|D+@R+MLnd@bx^*b9f)i?Q>JED7(j6zbI3?{fRQs6MXBw`0B1@tC!-w+n4{iND#K2?RGRyA*?PL)(ZaIbF zDzBsx_ML|&Y}Xz%-Qkd*48?v(Um13R?^YAI&l_C)e`xP)=mMqQ#x7Mig|h;(9L!K z`SsJ2^|xOP9jY(87vI18gMigGI2u})%-3S<`f+|`ZftCzY=F|l%;S8@6q^`cnK(!4 zn5Bpus{#!cps@q$gaJ!OIr!=CA;M;Zf>>)5VBITTp2|p=NytddR_^-9+uoIVw+|;( z>kYJMM_?^lB~Liyy|rIA>I0-jJKYlov}i9Qk}D)ib{G9RMu;=0kTu)U)KhkvK!&)x z(%;X`jSmu|)lT$;M(J-xBL!fEeXH?%EwFS&6KOFrec8k+8VyZup*f76ww0}3{yuz% zh8eDy*w_H3Cz586ui@9Q?@H;4=S&O-pr4s+N94a^v^zJ$J-Erof`wJN3^--`vU&4+ zzx89gvbc=T-tq#3Jmxt-jdmh*I%=%@17iR#LwzG412r0~zMHe>)~pL`uq;tRGW^PE z*i{)p;ZiBnY)I#6C&vf2TzHiq22vy9HACSXt?gJJo^Ord+k1rj>UXdBnI7aIkY$<4 zX93Efnrfa+KYm<)@%%;OJY$b)v@@c9@LqG&g&!LmUtXw|`c5Q#`-Ud1kBI>u<6cuEAHoZ=*RxkaW?&|ByF=|cB?P^IRHasQ*wOdhFm9u=E#tCD1X#FOe!0UIp zJTuY=CB)%SExZ^WFgUb5$`eB*Ab><%2wO(FC;}*+?g~48e022q@n(g9H;5E5`hlfcul#{Bbum&#Eg1){BP$2Uj09ul=7^)-XTuZ*CrTJi>vRbD2t}q)i!y9?0 zz4IAH)sEJbuN*bml)}z4yTY(#L4RTU(lYta6A0(eH;@#;^D<2y{xw`X3oh8%o3^sJ zI5%KZ7ITS95uAdQQE-QlsN0Qp!mJd$B8JMj&q9~~eU8yKuM{4lVG?mCE*o3x{kZ;em*NDPZAXNsA`5?GwJL=ZGw?BrqT)o%Pc%iDaS7gW{B%V;K zh6iJYC;XOmEf0Eja%P}lbdA#9&TsG-TBJ;c@Dyb}F2Xu`ZQrcr&*EIoV^Y4_Ng3U(Ng|CI5uAI9?+BBHKGg>ilD*I zbLX%C3lqqfQQ*R8|^y_nBoO&#}(&DVo5ys_i)_33f$L~9>W>6Pc$Pz%uwgL2XS5Dv2yAc}^ zs)-UbuiA5|yR1+ZKY3H+TjhItRtMcFek&Qk@+*Ao35%}H*<7pN+ES#uQKE}=o|-RN z#l(mvC&lwkK0JjO}DZ9FnU2 zLY1Z=$&$Q0RQfq+lJu`drDA{8GTzz4UgjEy!A1)A-KQt4&|;ZJ_OywxQYihwuA_%V?Mbg`#*RCp?d_Y`KSKX z{t0w=XTA_LVZw2iC z4BbCN2OEk17umJ{PEGKi1V0$&|KU}9xNZyX)0wbE6B1e_2T;4Fr;RK<>0NmG#*Z)g zc+tW!SgELFr3=5cGJphM;_MQ=$K&i4OTFhSaJY%Y0oWmKf$7;kFv`^+42|+%t&&B9 zFOdA}71aCL(#sEa+@1g;<9OQ-pK6n8tnV1XSl(qh--Y1?kUIddvsKWcR6I+PzxV-z zR^i;q;(lQ8_d_TPQ45&>`uDU#vxw*-JExK&J2=)z-Qg>W-=WH~(0cayedgVhRN-Im z04o_YwNV)qR)qt%&r1q3LQiRUl`Hjx)Wukz83(^P{6ibsDwF46=cSF$>dd(Ds|tV{ z3p-cAxMp?d8l9~ElqTJrOVftYLUkUE8$|bUdNY;ObekGY*#GtNV z(vO2fvgLAwY5CRZTWU8OpvpUV74I;7EtiurJ=jb5QxDs~4l`3f9LX{X0>6yeUARgl zzFo6~A9|#Rgq<5dX2m?=`sY3r<^q5{K?QsGQ}i{^A~S=WnhuN{z)qIpVZ&8MdQ)op zN;EMVVb2d;(Pjz{+pVj*hbT}8I0^`t8b?(vzPVxfSKP|)!M z58m6Vh{5C-5Av#}biQH#g?D2BvhWg>JrFw+=3$V_x$M`^XD0*QQfPhJsa1`OyP z9BVNJyp23~T|MEe_HzNoXfXtf!KDFbSf7WfrVOkKGTZ6rGawF`J3MZ3YEKVaf0{(e z1@JpvXWRu5PN*??g=S{6A5<$_iA&|a*ZG{$paEv!#Lj0`q@VziSsC3~clUQ*2Gf?3 zH9RCAj~6>qCNqEQ%T8tl@@+Xg56mA}mkMZzo8_yb+*XQ zkM%7hwtfxAvrLzB-R`)-G+_}D+w>uIH58ef9Y{#euD}n!L#SkLDtyszb8Ql2D5#3q z*I|GQYy8O)74aBHPJv$E)DSE7us*FB*81GbD3dXgMK zsiV^Y3P)*ug9eED$(9y=3wJzA=o!o*Y=NNCa60_I&A|K69I!9Ul@0luL3O8x^F@^# z!=I)d8GxQ-Gsc|%J+ot58(?NK0A}ZfnGKkc z<>O&DW|g=~0$*T`k_?8C+^LA!hj7Uz`l0H>{)7Dmd{Qpk>LvrE_D^w)G4lUE_O*Zc z2p*qdUYclPa0=A^&&kohM$b?ru5mL)D03HMM`k{Sn_vpf3f8-K5S6v3MrlA3+k0#n z&<=q{O7JR%6LD?7gGNA-1tKh;q5*R`amg$<2)#IrWdMeyE`D&I_^8w;@_TYrbTn?w z%E19J?lv_lK# zCH9qmS#52H7mS2%a210dDHk^PQLH6L_)f4}rXC*ILE`!)ze5QBDG&GjjrZT;(aN%P z-XLJ(rICYy`>daG%_Q-2Z|}6^rpi+!R|;km{}}!TEE}Vh@-~m-5en(Vz^^XEky+^D zY9{tF*yoru8r6E;hHzF#gon^8g&Efy8uuY9<|AaG;(6k{uNYuBx4 z_L?PStVu!h@^VwtvIm0^iz%7PI6WTLgJdmEzU!|SAQzdHwZ2aHt=^d`_M7^{<*}}@ zW@~4idIkjX>rR_pGq*bE1>G(}tG{&{Tu+>ocmMNxq1L^jxEEFN=6%oae*4qd>C1!X ze_rT~`SJKuSZ*KNBl&`@pPyuotBO&QKXQnLUTF!jYAraaD9{jbO()_MWo1N;ZO>G)QCeoB zCqQ1tl)g?8Y4gbGE3|2hE-3*e=I+-F6WpcV{`F?iL4>#S3^mm`u@5udQI4M4n zPR-BAc=0?D<7%I|7Dsv0kpXz!{VJyni@@86fSx4Zwc%?W09MJww83aEWoldE9R^fb ztpWZEcj6}U-}`P-8vfdJuAF1?b*T@VNE#}zcB6bxtE*e{b8mecZ)mi=InmXXZU#Nc za(wPhh;Wc8nL}J;3d_mk?=`9xHz6imy-ND5ToswL>sHxh>-NG6ryjtv8TeM+OO(3@ znL*HpdF?jWa&9v4&XYO}@8zQsCx_2`#oM>gQ`MXmloW*@po5Xi-H2qkb#44o(k07Z zzlI0MUHa}Z@LO_BQ`6==T0&yyt?U?YrIciBxjhm>rGh8&!KS7LZ1tq3-Ynx{^0}_> zSnW3Y-AxfmFf1#`7K#mC6m$=7Z)dC_nJW~+O}OP z1+|?6CZzXZirgyftvj9G#9jf|fD~C5%);=GFME%4cXKP4o6&6J@nP10<7<0!VWGep zzowe-sL{jp*zrI+Cl>{;g(KSw&81xO8zGjb_NC+QaYAPU-oV0%$I!c+9C_0nf z%h8&Yl&BDZqiJ%O=?hO0F8Oab(7Rd8^k>xzLUp;MN-HbPICFzp{oMtm@Zt5h%M7PN zbmoRXC2emGe{x(+?N00U`Z<%EBP(w(BdzH)^xajb%gj%|c;S26`nqP!h@Bpz6#fSm zo{uG7D}z&+4X+dF@%XqzSL|&U?P_vU6WL!{qKKlQJ1d~`G)X3C>fPYW#ZMi`(w`ZM zTwEey)Y^=I^~KH2vBsYKfVCggnK@BpwSR39?}BMpX?uW`OFjPpheyaoL$@{&&O@QO zyatxccCaH{+AWN$Cv{g|~ZAQb)>UaYaa zP!z6Se0k;lh~&|^pw_M2vh<80pPVM{%L@J*%WGbn+>gdlJ?<0o1`|#XQqQT%8#wPS z5b-3byNE6j^PFQn-7(kR54?R#4!{@TJbX5K@QI1@+QRH$_BDPzd&`^kIb`F_u#=y% z9CLhCPuse;*Ek+B)#R0l%;jZuOUm5E&O$`Zdi{8YWx7>6S}2~1t>;njT{w-fyL;Ec z;qI9c6XvsysGI4jo-TGdN#h0xVb&z&QjWoKWqpGnbigP!E>4>C^CYGp2(k+A3cN3#mYXv)IKSv^WmprUOL;^_%CHZcoR zb72zq?a8cZv&e!&^=Q4B8TDIY{_)IxtbCErWPNBggR@p|(6=qod)k{IavkFPJx`-np6-U*x?;d7axD zbpQK25fBp36FqgdU#ABe*j#rmAn`7n%wcS->xuiX5K3bXEtM$nmlm|}`4spOE~zCn z&e90vfX&gxMV@VATTO&{wXe5Z0hQt`{}t79cR7u6pia|bJjyy=0D<)Ovz7X8gx+eY zmOM$uP^U3uiyBk^o2C9jp2v2D`OQ5!LZ_1zfuPq~Jl3in)pf&Hk&cWrx^P@2%#%~I zpkf6wNxo%MjCku+iLT`w8Xq4ga9Gmz`Wfmoju039=?xkGo=%^E*z|Qa{1ue2vQ$m) z%eBEGfQ-@hL`YuDH$^xjW5ilpEpdaL98Q0sb`8#g8altWmR9E~jyWe=R-?c9mbIaC z4Rc&G4q5u(9)CWM*@O3A7Pk2q|fzitYESKGpWdSm%d2MkM4fH3x}Y9hlLSdj66rWl|%`L5|5mBJf(?9t0i zwy4j0@_)3w1$L(BjcT#^Q-;MoFZVsIo4_$+vM=2?9DEt*+hs9vNiz3QOE11s z?}MNp|?xZYUh;>aHFg<&50 zk55lvhd>Ikq!VCjB9KR8p~M8HOBy5;gtp|M4i!q+`O^j@?5w!Dbv+BU0tL^r>UeYJ z7z!g-B7XdsNaEq1Z>g&H$+KQucAjgkp76MohNOdb$FJ-zGjfm3(_4es&6f+9ihF+>?nJ>Gx*=r~C0nsdLi+uc0{U5}PI^=7A|kS_ zTnb9q_Ac_iGIS~F@fup3kw_d$xYFO1qp4|>Pf##W2#v-11Av=|i~j4-o~wXdb+)%a zX5_6vqxSEJRgBGylNwiBwH={zf>I-ZI09~KnZfAFl+4E*KY8}gSJ>Mu^|6LVid!C@ zp7mjDh@*D1O>J$f_@RU1F@a64D(SD^$Hw~qSuE&%rlnW9=uBy)*|D0x4B82o^p?5h zfo%P(?lxXx-uD{0>bn?XrvtF+*AL)lM|T8|PM z_Vc*1QcVpyr$da*q>7oZVVx7c?i;J!laPIznv=P^$3v*M(8fAtfOMc(mFM=Uh|rDv zu5PC$+=I=gc!euDvQ&;DgR@mUjj4hKB`1&r6bk@jgG^tJJA{8&jwbIo1_7cY93dew zVq&6dfr4VC>~FC#2$=^z(_(;HxW_;iLhBl0wW^cZX-4(SmsUkZxi_mlfl?@^T^sp| ztJ;qnZA{J3$qTFa?E(DO7C9t_cY4}W(YHI{;yKR?mTpUnW9#dUG4YJZLJ0#;_q&@~ z081lYP!3RWBUXrqe1d-`$WY^l-pF(%w6PE`Lw*AF#bJ!i%lS&x;=C)up5skTO>q(h z^-)AUj!$)Z*9EbLEuRqnvf&fZ>{VI{!8Aga`D2hu)T*j7i0Dw{wXh^Y(&AVrYd)S z(jO(VMLtx=5~?Ru?p^dJaC5lT>}xV$VV(Wy*-&4BBGW11XmXyNg};bZ>Xg<*?rn#F z-WpmRTT&_2)*3hM0cDD6lo?2v$&<{rK{9rAZ*%rtneU~1J}8UI)atJCMcoXl0RRx_ zbXC@^Yqhl4T1ql98nu%<_NX(n{3OP8GxX?Pbh-ubt~zwcB$1`2d7a&d3v(snju40GV$a{2{_x_ZEdUi%MHk85$k6eG{|3QmVD$L_NLG3`C4RL73FX9&;PUNL`w z%#0EWpp_Zry~>KNyvl4By76nuJZ|63%f5eh`M?-k zm5?FXSaDSQ=OzjPdqfeCrh!hth2zZgxs`Pi5_lT1scETodmw^;Jx;C0k3MK8mDDPAvQ+P^-~Otnc|w!kxSrblse zVS{=pzj`NZh#SkroP4V=R`!ES7U6YGQV+W69b35jj9^Vv{%ZG7ZN=9}Yz^UQJJEMF z*17V^u-EE55P+*O8}Dk+fBdJ7AnDrcc@o${jdRRZX#Cnp$7dXPAEqfdd;CXWKejCo z9`>nmS#6!|<{>SeiJSZu{twol>;-AtCwm*^^ok=$2L4CnJtij<6+JTlFB{>{zioGG ziBslJ=l|vdxayPWokaW5wl??HLx-Y{oF5Ome=lw;k49-W)a8>n|C^Cf)MCTzTiyMV zEAP`87>HYNt>B`PmWD{bMpdPvh$lPO)|5_8;4U^DBo}Xguk-driKVKc=BC>I7VU$F zy{()Cjz5+6+gPJ?WaRh^!AG&H&lI%c=y0;;&%axq_%ZVaH8|F=9|z+J848z5mgSed z;~jndXvm>Q#%Idt=m!Yr5dpk-XYA+2be)u3?)|J|Q&+1TI_A^(`7?$rB78!TKc&XI zqY9}V=l5Z=mf~IL7v>WQ2JvDnY{9v^=TEdI^e9*PP-|B65ZDX1tGZ^@)ciL`$AO$G zN$~iyM}|iEq9PVG{T@Q4&V9~x>S+HLXj(1WJ$?CJN!4HsRu+$;yX5I&Q-Gf^!Tm6rjm z>GmeV<#(|^uGNwowEWDXs~4@7yguh~^*1 z4#B6|^5Kz3>MC@1Gz+@s@>aIHvS&uN-IikNas68=qw1WrydVEw^?GvkIfD;sNx*;P2(lg#*#q&}DOY`VlsNiTUZ z@iNat>=?ABs&&1( z;sU^sKGjuIL-@K`Z+xxx3GV8C6|8h)k8HypE2}I%Q+*t-e(l4NRXEq?t?~X* zck8*wT+Da-%r(wMi>9k#jRwrQ@{JWK68(c4aolkFrGG;}oih*NN3KCW&*5D@&T_PI zZ?=_)i^?b5_SW0C-GOFk(_=}R&ii4H%^Y`b>r7QlU4Nh_dpP=7OGvA<+1<PR|4goZfO9Ja6X(!o zR>#jiaF4WV>a0q^-87I6<;WDP@%Z^wQ>?_*a}3{ZYTeB2c$o!?TmFP4pxE~jv$EL> zo?l|&nb{|3V_DR+s1_^0+24q#n`m(mSw9I&o`K5d31}o{5m{~|oclIZ`mYywrT0LbLqL@WuG1GO{Enrjm zQ@nfb^XKp1KP#(5G(7!QNL{h2YWBw!DB7kz^%Ntjt<6GdAnaCwq-l)B_Z@E%MfRuE z5vyS9>fgp|L!egLRO=zP)9I?7T4@WE zch%KgDKJPoI$7*j`lii^-t~+~Z{J2I-~kW3tuNY?^NlS$9QG)b&YDpj`OJ?oe}O!W zZ`xdD22Mp}P8MQeSPaht}UW(ahkI$!T{@x&^LHwj2QM}6065r2cb27pA zx+lKGyKqQFT)@59#a$#6A-_^}dFC@A1K{4B_g&#^7B2e3YKYAdLIiw9C&~d@uWym* z@$D{_^0c}mc%PNVR-EzHVo^F&{{K5KUfRgel_clu<-57?1GSA*5wc2YO^2rt}wTruAt)i1+XgKq-RN>{J zI2Iif&HP5~Pjd2G(g70k7U^mZsHz(H*u)-i>H8K@Wahk?k189m?YS{4)>m_h^8wcH z9&}Jjuf)tgNMlt=tqvSf9jmLJc(wm$N%w_jSH->>zS1PGck#y^jfC=bzWRsMJy89< zK2^P*5XMby{vKjQ?lpHtJ33lfIZh8&Oij5@yhecbk%t~GeoL5uzKwASwt$6HJ5ecO zbegkzn(=FFtqL?TdfgHK1^lL%;@Y(tOZ?_yqZod(a0=?L`lW#gI=xmDwcjuOo+`mn1;MRL+49V~FKVfcxXw9xZAOPFg3^i9-3CRRKL2J)M7*1_^%yW}^{0HvPXuR!m?3`i{gM&1*B)MTUt3sP`|(8@#N30`CYw!Q z6cv0o%i+LHi42VED!q`}Y$r+|R&ONP)A10_LtPvIrXfNdANvp zMe@~xKWy#9EE5wfOhI$HgMe5|EPo2t+IsJiFsvu;kMVI<*75QA!3xZ3OM>I>)Pi>f z9+dS&S!sE{dGo6L$Sn`6K3NC5Vn=^^L)7|ub~Hpw!6Z{aZ6Jipm!DD$oO*FbB{F}wVL^&Z|KZ@arc63`=(e1g)ln5plx z&b5o8=0N!KARF&=Sw**+2%)*|WMjyEM6p~IH*Y$SQeIeSN$3j>Eg%7&T$2h2n2M^?pk@$kvjCzkk5n;>Dq~IG0FV!4PK1eYw;idl zjFSb6B3!by6iJ_UMbDM^%TQ#*Ydqt&IcKiYjY{R?ofx6#EfF}>bUg8vP;Gp-cC`P{ z(pZpDNVyq8GyAovjO<+xz)4}i;H1d^#YyoAhs~-Kv!s{WPmM>*KMo(;9k#S&l-JZ6 z6V6>Kq_Da=WNGfZy*2xjXhNB1&JXoWEGuC=_AmYc*1D2oe7C!Aq!HwpO&V#S^poRP zzj$0;{f&6G5n)@<)uluvsFhdN!E*xS;BQu^>(3seMP#9Vki4R0q#}rm>q%ZQxLsJo7>X-;2XsS6{C7R8-^`0bJERzvvj` zpfMusj*Xvq)g|ga@{pxjRRF$!H32Ns4Tp}~Mv`2plYI@Z`m(-MEzLvksx-0lwpv=s zT)*L}CqR$?!X5v~4}x`i66yzIqFo0k4Y@wW{wXsO+Dw>*4N69TPw3hmH*?8uZ~f)> zn3F|G&T+kr2mhM`o-jeZ^TNiqizC@5Ud+L3I|t)r*BDLD<1#*9%+Vz@<8}kjo6T$ZN!+Ogq=(H#!?^ENuHn$(6>NzvMNmsb`8AZKv`6K zR_Ay<4Tj_^DZ+kkG9P;WeIC$O7>^_7_xC33Pj>KxNI}%YPACH9Z31>|tYt_@8Ot0K zS{Z9?4dH|(Yd|sx#4^nTaKy+o=@QPJo3H4j4_cI3Tbka;(^dwqUq|AJ9ywV(-y#87 zjois3FB0n|>9P+OrT-UuZvqW<8~+Wf?zAV#E{dWEA!MgSvJ=W!vdfz6##WM@BxDOI zYh!N9*oVZ}_kA#?Ms`D%u`};AGw%EUJn#Q~pZ7WMInO!I`=0-t4%KYe@4Bwv_xdiM z&*$s8G`_xG>6)fCh^+D5-%(N&z84jAd&RTBNdq=vx4V&0TOf*>HG63rQCtPZFbUg7V{{Ez!h8xG9z- zS5eeKoOe!18K~x!@;pzoJdR!`0WhdC0{)u|0`a(Mz&a=b_C=n&3)+EH-H}S!Og(v* zL3y2%Eh#PB$8LUoZkp3?F7%l#1tJ@0N2#<1MlWKjp_>y(t54oDUGwv4QtIW2)~V9o z>$9zKsP3ecYM^IPxaUea_<)}IWvPKPlu0_=^U9o0^tny4?Iy28@L#hNpO)d_75?Yg zcl{4di|j7HC55VnGG&G&ONHI-MM?Y8NLbGc%{9KfhL18bY{uhaI5Q%8Jh- z?0uVZ`m54z37Xc`(t2Ma=WzoMD5);_mzIcFm+m5CXng~ zo7?eW-@7`EkyW&g9S5}m#M#+{eUv)~W#E)5sJzt%#SYC|Nj6l+2CRIjC z#C5|#974ZR5!&{k)QPn0dndWw`RH^~Q%QZPh0UX_j>eL%MLytA!UxpTKgk8Gn473K zN_pxVERWA=d>4A{5*G(5bP~)liqeIqJfcqdoYvKCYW!(wl18~?#uYUMV@-t!&5v5H ztE<>L*!4crj7F;_THDN!-Jf~ehAicUg&20^&OB)8GheU*ZJhgDXemCdBL)M__rUs3r`kOWksZPaXQ^J}Ot4TJhGBJbzypwuz1JylL@q1vPv2oxr zPrqifVqXYN3`iP{r-{1aM{ziWJ*=q9%}^(#=4-?a(4c}R{Ts~PhJ-1nx^g*)&2jwD zNHZx?{8ChR?jR%KB-alX0KF zUF)=EPHLBuu^#2tssCb^MUfFc74`%&@TZ!h?Tw3dI0<)WnWCI^Y{1OfI()<2%u!J; zOR({+rT9O)YNSCisRjKfC5y8!OINBX9xiFk0LQ_eGAZPL{FitIc^dvdos)k!fGz<= z^KRMA@wINb&2dVDO$s0f-0Tf$QUwK`OO1MOD{y~idr8d-)XUFMk)c)(AD-o7la-eB z-}-DfEgFwzz2_vsatK^-=$$6ufw`6 zQ2m@Exf1U$WHvGBS9rTxO-*+)xp@)yQK(HUw#?DicC0LWjI4J=wf?rE_<{j5=T9bg zHqg23`>RCLXl#lT7u6+_$7K5ilpOD$5*q)rR9#2huUYKr}e3vW#zl$TEy+FKW5j46RaPZ@J!wK&tY`~wN2m2&1erDQm z`MM*LD&2;7(A&Ge3zSX*>xC$8+rm&=8w}#I&C4E>BHv~(7s{oV7;=OvS979h0SN^m z`V#7-S<}}=gHpiG&gO2EU z!+yuV0{J0j4%<$oOb^p7+V%*kn90}*=eyLj$xfHaH%<>RJDZz3JKKC7ME9I1S`UXR zcdEawNK9?{;YQ;aSkO_SQ;3`FZJ}XbIOM=KTbbOdtl9~Y`&QUw{6*-3h5Avd((uWz zi!KAo9DaZ5l{q@QO)G`v(vcqk8?KXA)+37c2bl4Ft~LS7=D{p^(A6`FRaI5J{CuDS z>Adf`4tnuTci6+NIKt*~&NaDn+L<&-Mqd5Mgmb6@IKaG!Q!ShWvz$Tzi_*?st@Nk2 zi0|?Brje`{@*69yt>P0OQ+?NMN$qP%>-v*)xWDuxv9?Cz zk)g+_&Sp1+G&j?5_%Bt?qz5cjQtH_bRS*8)0sZhYrX0Y;L6p#CP_@76>fz43SMhIDFw8_en zcj)FnhJ!Xxfuu#CRdg5WeEFoT_v%Pd91n%2enpfL?(~Cd)PA-mgKGindc$q$W;WZt*1fT37ESKJ2%=$RJD z34c62-5DRBA`V%0!Is!qT6TXLnj@F3uN(*3Pw#-B-^shcff$WC8cpR!a+~C>DDkqB zpAk}tv{Q9}D>~WKrvonS3jaSIfY-#L9u+we5qSC%HfO}glz;b1|9ZV5|Swt|KMnFRCA(WU4)v(fvCWV65(>&~o;n+5^9}>9)4c z6j(r1eiXUvMn!c{Sz=>j-Sr8;_^7@J!0jv)f2*sWllLR-G|Bhk^zLf+kT%^dE#;VY zvV~O#vU>TBN3@hy2qMa=3&yfD7@$xg5V!*@(z0)=ndv`13zWsNpL~U!3 zM?@fYHz#93YL8vU`z0xc@mRV=Y%8M15iIN`_XvNJ0w8>?tb2RG=m-aa#bOtT@>xv{ z!A2fAQ!K4BBnl7uJ^lJ+Tur$h1haoH#UQjpCh+tTi8soUMhwGqxKskdihUV-AU;JQ zMTtjlE};D5^;bHJSCB6@mzpFrRtpvI^n&Ia9 z5LdWIj3)J3%NLBvodn4t`9;s?8=sMV1&s+}6pOj%;L|De#5|^g60OuAEoQoY{YsM( zk51chyjA2GbIp%tdM^H;&agWtz4hofnKc@dqIRAMtZAfvGXeBqqD zv2vPoj#&1X-)SSwD%zgwuhYU=L($(MMHd%JL+f*uq%l=gQURO8z#G`c;}#)j)+ zqUZc49*xQ7rt|D#nj2sf^e}d!TYSf{coYL&#ecCpfiSu-Ttw*8#)-Jl-Gv z#Ka>d?9&&7W;->AlTPwaom+dbYf<4rihuI*md={)2xQ288E+VAXLE@K0LRI@Ao8-> zs5dW)8g%L969_SYmL|Qd%QtwwNwk*hTr{;{p7au3ZV_2lf^V@++W1ad7G#J*Clr{A z^emkVZ$xwbzPZGjrTK&suC8&#bz-J_{j9ne?l1W8)#eEgHH6WTqH??_uz8Zvi zhA}ufrRET%oLq6xFT@{qzw^ql?NWC0T}AdhI#WZuE5)8T|f zLtpUl7#Q&ISTOM|38@p!)%1J{3>GcJ+8lVU8nWMar&(<%ZMHUtdtjenvpX%6^OQ8G{8EqmHly85%JIkPYd3Ts3MhLj*$~c?# z^xi}d8<{WIg3_|T=~B)}Abj=-1j4ZC@}B#{r|ZUmcs|!R<~&0vDC%zNeH?k&ia2W6 zdmB3N;heuH@|&h9%^Zh{^>F2Ky>Q<$u{vw3T1v?P7`a>$PMV&NT{fb>W_9JmUH+K# zm1OiIIhW5+im!sLlauiy`rF-S?fnrNfl_(y@dx*OP*F))oZ46OpG3ISja1`fkTRpn z;-aQxlO=`NG`14GolNA2e{mukxDgvJOj(4oHZYhvBVj>KLU-vsJwO*K9By}cbJKWt zf0)mj=YDG$Z^GxjgmmPvw11Pmb$Qd`23-(LJBJ6M>WT7Y)Ji{CFcWJwp?>_JZdB7S zhQAOel}%YV8TY z2JH~_g+5_-Dj(B_o(Q}}r<3xIB|{j- zI1hggv$LlijP!{!I8nKs*&rI(o!opa<8QAjL~8d`03~yWJNYL2fGxfKK}&4Vi5RxL z4gdO@n=$=K?3|hot?(LQ*@HGd?}gh#5X7-y*fQ%Fetw>VSW#u@_=G?BRNgM!zyTj@ zt_k<}Q|fg^&?o*R%7f?UY<1urf-a4Ppljm;$tsCFQct4qM!B4iC@#K-qrSPL!Z`L@zU!J5q1gjxG$m?CL`Ik4vE3d z+v=_Jz{<1U*mb|YU$?<)RmP_q@jk~38^bQ>q>1<44cqj=BSdUWp#1*b?B!o>Uv-ze ztU`?q@?IX}*bkUlZJL^$D;v;ETDlYnBe6|cG!q)!<96`?8jTHQPL zwby3o<;GClM!4=|M8bW%|i>Ot{{skt*uYG9;#-|pMjRYp&X z0JyVY*9Nb5y5S7+kw@{5MN%U;A?+E(T>Q{yE7Cl&W9&Xjii2nYnxDV{b{IKYfYLf0e1G$Svh)@3(O6C zkNKq;!2qbX#vh9f_jiotb$6;m(5e0yaPkV-8pa5H?hd!2S! z&ix?W6ABjykBbS$pE)A5EHH!eCt#*g-`!lC(VZ6g4)BZnDl_&{wTwEshDb^lxwGum z5?|q!C$OlkpZyDZ$|v(i_c@(RJEQ^x`^N@;k?h?Xbl@<&9`#d4p0-5!e<)XWn;FSN zdv>staV~F^&_zsso)^ z-mqsI2mPHWf%{yC;ulA&{wNYWjLR|2Z-UV9guIhuX`iH{c}w(Ofs%) z^*Bt;U1QJNiFe%POmw*==^*}kak`0lf9W@m++#)!!f+p97(#}@D=jwL^8~#cw}$qS z7Pn)rwa=0ll$qj^PJkGW=@N)vN?{#0U6NGR^L2c83?29X7N*X8tDbZuIXCyQDXY0u zq*?N+uK4+6rWZHQ%P4w%j(+vtYihN>=MqipSJFkTLLMTz;VAE)YIV5 zb!E{ix?)z2)~3V6DAJD4%C7LS)w|t6;n=LHsfFPMyk4QHsbRS450~+gv@^dtHOkKs z0EYZRd9a(TsHF>P_O6Dl*cqHubY>O8@C=$3aXK;2jH$&eqKH6kl zj2f(5)j18C5(&!6$u(|xQ`{7rg2FCxVguXxvDhD>_Te_GZJ~vCNk5l9bo_}g0|R4b z30=l8&+fUe-Cvxnq8aYF+%9^uehLNEIvr=dG%g{X8ZbR#C|dT>1k`2h#;e77%j?9b z_a6$YyZh426OUXGB`sEFho3)mb7=yD$?U+rYTprP{SANRbapyh>rY*WRg0v?DuicX z_TY`O)!U*f^e27R`x}~daTQBFN;L>zX=Chc(3x%fVrtHR~ zW!icQD=OIOjplQYLb%l~xZmfpgo&6D9J~6NVK!^Hi5;D*C3tTTr_G<80 zVOwdYFEQKTm9PynrgG}m@#t&1`uNR^0}d)3Anzd0+Sd4`iesL4EgDmE-{r+pI-jd| zb+oEEdN|T-;SzO^uCD*z*-}AHMd^pAkjJxOSg7BRWQ7HKE|Ze7jaD9ed!cU(b@0N2 zUo-^;>iDqqfZE!F?HNn});ws=fCwa3zpfzP>_MZD18|(0a|6Y{3qw3C} zRxPIkLba;KcT#Cz?^%0ey@e_h^ESF0uze15oAXkG8mWw9+dq9Fz29V@fGc8HXLvx} zqHpDsd8uqexV*OUlZotGu!V1<^o_H{d{5Iusw*|Z(?w1yzki-TNwP9wca-gq%o>B` z{h)L3SH+e_{lU<}RhS2=M`cB)`I!si*p2)X@h&r6nCdBNEuCB;bi{i2+}Q#>DWj1} zgv``fmlK7(Z6u;L*(W4=j3G1KPepP zYvagjiv3N=DmNXU`}He*&(dp)QvaJ=sp6S?+Z7+ffPijXg7_y9%g65gy|8Syp*Q z;mu-ez-8T!ggD_Kr+ZOpJ$(Athb+h2S)rSx(*n!(vziizXG2b7vO+TBxdi$c>k%K5 z@nZ)*q}CBhKCj$+;AP=0F9+gVp&IMFR=h)(+TXHd#pFB7G!N0!ZJ;i_!q?vWODL<( z5$|5&vi9+H$*yj}{esn^A0AoM<<3|nbl0Q5-2gdM(k1OfPpzBef~v2a>WH=6ncrd` zASTkP3Y|1lry|J$%HOogPCw~FM2|VsoI~z=^uw}QarX>-m%PVRjM~Cag-TVvs|4kD zpM7viA;QN_S9_eJSsop9Q-#Kx6xA@*R8vwPO!ASrpUnhYC|9Jz0I_y?C{Vk}!-Ms* zc=?ioTXHJXsvG5n3W*BVUWjR4f$$JUis{CcwW^oCMx1GgVNa7I&T5;Cd$^jbeoBG< zdFJHA)g^W3js!E(t$NTe!_W$M96#i>G@P%B{}eT^$8=}EYS}rOtJOri8Qe*APmD}& zH=U8kfJp1c_5HXw+^*t-ig|{daa5wn-Iz!x`1<}UO9Flhqkp})(2s?zYUpw{Q_|2pR2YXJ0VXK+#9E_W`)F2HNgW>K>W5nPy zGu@^YJp(%Ti)QKD7kiOjK~LF`1H&(or}sN6%=whx@8|4oK1(>Lt65)AKiUH!6G{U8 zFC<=@T$f#zaJg4oDYgBW=Ji0MiG(nq<$>bFn_IBN29O@WiQ3zz`xcim1iADYF#SGi zo5&@gbHV*lac0vYKB{7b0WueS#u*uJ`b$vYdhKELSY7ZONcS6MckbHzn$W(uMNH1q zdlls<8vUT!)rdv81<7>A;=&ixSGV@R_7_mC7i!erTqjyB{*zRs04B7;efU4PP`tl? zVJ@q1X2ynM-H)KoBKaZUS5$qH<(H>HXuhUkJd>7`|K32}Wb@kFqm$>SV`O)J2F1vZ z8bxt1Sq7mSv_5eOB-1w^>(xbh`q{(j88^cDA{tSZxt=`MhVe)2?34fPKh)F{V)R~+_*&a=z!Ua=!%EBMhOv*qEKUI;}crGzuD-%i9`3) ze4|m?10H-otQSNRPFdz$DI0u3Q$%PI!cT&lfUJKi@_EkQ)SuaqYPw&+-EyX;#Jx2; z(-PC}xvCN)A9|lyCJtml^m2V?FY%Z>f^@%r#pnVuxoTW^j>s8QZ*GrdFq)Ke*VR=Q zG=^?`o0%({OYet@TL#>FEZvn} zD|_1!j6i8lG;?90f4#tpj25G(H_Ms{tG(X!sBPU~E&_IAeyQ>J+z4ZvgE79T)6BI^ zO%LrfnCp)myc)bMK%HX{cBRX;TZXvj+I?zjJ=`C&Dc&4w3D2E`{(U~Ehq!Q`ik#8; z`Y$Br>GWUk5A>}iOBUBHtv=BOo!>X)&p2ZaP2<$dhmWUBcp!hc-hD_N%L;_r{X)^9*^FzO<4c1ue1 zheK`5dzeJ`etnrK%gKt@^+QtEVv6oZ8RYAS;)mF^2~%CBCf)!!m(wu(gP<)sc+4X zgomTlWl`-=(rMmRliQ3R^IV>&HJ(m_ZY)k1|P)=8knbUNK-Zyrd*iP>z3q=}7P;Zr7V!OXbn&paSx#unx+UFWjPRwEmd_ z42j?prl41zHltqd+cEdjTnbR~PV_2?Z5D>>maksT$yapmtC?;1sVF~N3n|yvcf#<{n=L^u7#fkc#Vc*37pzl>WN*c86g@oavX{cZmGCRh)iqt_3Ux!&Djj_#Ct zh#l6r&QQr$3B~A_C4&yM_*P`=D&5zam%HXhRI{$NEgNSmn-oFn*MzO{WrKB_IR!4>0 zBYZ4e3{%n+1D$xq)6m0=-wg%de|@2HOS5fL{Vg3&^kpfR&s!2jF?Xb9JvJ6QI?yF6 zm77Boqj048Hq4xY_g--FJ{|O<$8UeMK`Z0{IOD{|>s22=OD;?FrZ;Mv>n#_U^262A zLH_#8ndKU~x@MLLi?UFK52x;fQ%XzeV zDKu_l@sy+I5Ajl7k@2{+vRk{*v&oDGSM7!hjT;)OJrJ^=x&pP0!~}FLit`l8hkoUl zj6+UT8hnFh*uCHudo3L_-k@a?z~Ryh2bu{r_)aq94uW3^^hZPruQWPH)k)C1^J>r8 zLL{5xd<^H`onxK}n|#w%{(aaq{iD}D0XK7p{@@1Pw?$u>byK*`G`(a@f%Wjn^!(S} z55iFOjg7b2V`FLVxQD|0m~HmOx`R4bZ~ObXw1JYK;@DGmSK_9wMOKF*mAlt2UNv#T zU&?>k^79c%M*G^Wm(a848i(Fr{%g5Qu{^CsN?S9>&f$M;ho*k2!Di-1Yvt^J5D#mz;u5QZsLc?SDoG&Tx~O|iOFl2?6Xd4{Z=i= zTJ4Z9Z@UlMbqNuc(OZnCV9$oPrJC>;P;nMwh-Uhj=Z~w|ck&EwikQuk48dTOOIXeN zXIvH{+bJn}vr|8N`MmObZ|YyrzteY!T~qWx2>J#;l{$b}Jd4A<`P)lAe{`;)EwsJV zfk2WT=(K}z?&-wq)(;y|DK1|7r!flMW>QDQ?$^V)Ojajx~A&av** zi2^UFoMCsl7M<*vsu#atDvdp5fRIE>B9jYHV#)6t*}e*5dV3-aOH3(Y%SDt5*M5BSQ452#D%g^(Mn`0yDoLfZDV7bQ~a-cxh|T8cJ968{D;PJ-y~Hxm&($76AfTVlWIAGBfg;daB$2o{Z=2Wdfg1NM`i zFYGTWNsz0QKVF=(;>{>x{=H}vI%@t5Hgct1l6BeW(~4$Cet&#&Qd*44PSPiNEP0FcFRC7 z?sEA&*i2i|ds_IvVtc0-scaptu0U6U-<^D#ux+1o3bM8(aOL=ZZ^Q4RME63|n2KBo z8;m|6{q4sWe~)^`n0)oM6&`DMXHTA6_D8rDX@oDp-Cp_*st+nzl;G$h*U}azt)g@1 zXk5xJ%9V`W*Q>7`aVja{C=JKse*~}12pY{!>&&2h455`>SRtvhcjbbft}^S&@QX6O zE^@?;u|xq7zHSdk-qrY)G$J-|W|_w7XtetxqtI){$p^hOjSPC(vPP*AL!LpYT&=fl z!M5uK+iv1%icb{Pz>E}`ko#MuzjL+>ka2M#K(}Ga#`K&2_EUg3niXf&?W|pC`fX|A za03!tfT8;orP^Wu_g_1x5%yF`@a|lkbYWp8c#Xo;FXGV@hR_gQS1z$ z2TgSm1-`2Dy1t_v_i2!#PGC4AE?rpL5mC=90SgBFv7gl70om$1(7@tR7VR-(FWXFgZcR*Y zQgAyJ-C-MwoiP<7yTMv8l z%-nril-4K~hk2V7gST#p+_~gze6{+4Rre>!IU(~1I~jwvaF}>Lj~ImB*+O!FL`_=b zR2+=vA3(BG!whb^HW_E1LqixQ-Oba!Xg!a#L(Q;e381tmJMoO(e$PB2ct!G);yn*4 zD0HN>bPmEo?C%Uen)8N4EPz^&(mEA{lplAG$z#fDw~=)Yerrh)a$CJ3j44j4&kviV=+|h{2-cKWydPeW*f^Xorj#h6(p)tg$#sfyMlub(*;BjW2 zER}m5hVyLN34tjA}KZDjTU`}fdFA6hv&4n#>V9}H#cJs z>Du`Q3m{FRmsH>Q4%YmR@n&d(^JA9^5>$9dNqP%uB$9fLv_9%u59GSSr)RSHfNFz&#jh8TlSpWEWiNmVN#%M3DD8_?uE@_ z8%r2tWJz~6#Uf|Kepy1!l!GMU(_kP4;DQx zwqHcZNjV2>d^!N1ISgIi;MIyZSvH$hBwlt=I89w?+DUycVZyi4GnzvwGo) zH=rrDWjlt7(W}#D&w3geinXDHg_X~X8~u_hZrgrWqg9Hz?q#~4iR3C8(r#PYEJS*d zrvjwBiZl!-E0My^^zv{z$4ev>puD}~7eLJ;^Yf@ZjsTStQR%(7^s_Y0U!UrwGqdSE zkfh3B+-AR4=-G&fS)8Q3XKcJb$zfms-Q3_0G!bg)Z6X)G0}o<7ovqaF_5pk^6_ps5 z)Yaz$DYO8m9;k2uAqL@CCM2&@lyC#v8cU^MG0l%J0Zb+p)iaSpq8fIs%Lm|D4&oRc z4>v4;NcIpx=7Nfsj-X1WoTMb~e6CWNxB$LFKKYQ)%FP+6Y}oBddMn+`(G}3+^6`t9 zf~({bBMI=SRa8?=f5^uqqfq6aQt*VRWgs3k#RjyaUN2V44=uno)oKoW{sTgI3l4h0 zN?{&?wVp)n(Td2$kjPaloq-lcS84)?^+CIvWCU_g0jHZ6rE*k0WE-i>&C{Kl zlsZ0+ZWhn^tWRplvtw$4Z2I`Mty6=_4NKO<>FEY7Z73kejn_Cz^8T>q1g@f%!TnRR z6Y5!|hoj5NC?Xqj1d|amCo7{Cz$drj_U2N%!IRdBX!>#oRB%UjVg+`VzFHH0a;{(8 zBfXZkCEOEMQcx-P&R_K$X=W|>SIln{6EqZnJWW0aL%`0Cf-)Q zd+`Rv^!nzu@}hu5oB_gDplAB$#l+O_j}ImDTNvv!0KYO(B#X11f`Q4GJkxmgEGquD zx#spE4*g>ja!&u4yr?hDdD7;T#zj(H5rts)CQQMx+kxWPn@x&PFaJ8G$Vx(m7lvzD z{KZznc*SBi$5)0P+zPoRplHh^I(gW=I8=_adEHMxIZWXzd#;Jg>5eYKXb$JwO} zTK$p;(x0hgw77!u)UM~-$@l}AuE}O~L2)VvG!FWXAHoTc9u~a3wTYDvPJ;sXZ`(=o zzP%mVM8+{f*Jt;(m1A(img5}EX3Z5m6oDG#RRMV=X`%ev)&Wh9k5`5lt=cajp5W-! zuAKDdT&?!o?djRT!vgTuH~s>hM>n4l!wN2V5uLpL;n7 z&R=i>UpWOk(oDh2N8J4VFaEvY4a*Uq;Va6lb!5dSk{a{rN{4qV6r^W~8E7g39)YW$ zqoGDFp~~RZ6lK{{0H`aJ@wekxPfwYn3yqb>X$i_J%@3LRlswlV2Y-V_5g4+-911IR zYX%qHN?FmgG^E>e508J6ON`{t=urhU4GNd7m_m_LFF#4uw$nwTYDs(dxrtQa0cT?C z7tqn!Ml%+e`VYB5$nPZm_?iBnVUhnOltZ4L|5>4m7V^dhZ(09DTr~wkw1UJb4&q90 zR={2}&SnWKCs`E`(2oZU2f)ucY(d^r7C>AM*c}5uohOW*R(h9gC4u%UzFS)^|Gtvk z&V)RY|J_?L{#Uz9uuT7(x6Azh`5L`DY%fGL5=o)j95N1;>UINmoi~^E%Zd8`km@1$ zYLY(LKMG}2GBy;vKQNabi@#f!;AHKS3_22_1MptsD5;5st(D;5$>x8k;S7G&6sk)# zvGyNkd>@%XmtscI8JkrB9qUFid4H8Nvr2iraB%p?S2{!b*nhi$0=iwwCs|Jc^Jw4` zc#oBxJLfpYu<(WCl7L4Fh2~5pvrNhH*jCYZMzAEj{zQ7RXMyBkgEM*t^}Ah&*0ta9 zxzazq69W$p6%YM5U;6=3&VI)^F?T-ltJ5h=clq?twqVLLpK|q>-GQG)9+}oTZ~%hG zET49@5P4K4+Ub4Q$i&;8fWvR3IrrnQPVMxU7w)B!UVGd2pZ`bw)wwSz;r{P1{100z z`tkh;5OM*-$m_c@@P*8HS2z?(^aH90y1I09pqN?ym4!mCeW2cUsD(Fh*l=>kNRgGI zdXqC(=3vrzM-WJ|@Do_bmxQG>Qu!_)+M1W*gqnUJUqa*_Cev^X|9wg&^>Bp&TliD1 zI9jvcj=W%pf*j8R74c0I`FSfZJ?;m=6(nYqODzog_V#$FP}2+-vh(LK3Vk5WRI;Pa<8NL zbgjvwaHsIc179&9So`y0KEC8vA$>uH6lG?gWjx3g3g!*dmJ_y95kmrI8W- z1n#qDiihkls#VIhQdQ2MyrN`VEJbHRzDOj=*9VUl1c}G=Nk&gy!x&#cnjNZrN)t3H zukif&TP8JL2YaXpfKebl^$w&(NWXvHr{^+094ZY_?y`0wJoG{G z8y3>r4qJaw+1_`-Hw6&j2g5{y>3+e`jHhf8Nq(5>_T9q^7i;{|=dH&?+zlWGfM)gw z*mOTq$6!CPc_}4Wb2fxLn)Qp6(PTIRS%L!8R$|>=9dX(8K*tD6zP2kdl=y62Mg)gHVKFh?uV`n=hNQfe!9wW~xVa36VE{uHxOPm6L_*yGmbe+P4=3)J z5@(gcPqO^vPX7-1(L;IwVN=f~Hui~15#9)Kpwbv<2NHt+*Q&G`EEGN|mLfkzqL@B= zvENMGBF)l@oES;9k-C>6k`e(JeCsFvCl$Xax?&kD0R#iepN0m(n%mOjJH!77Mb`eK zUztEwW&yjD7EFJj?20CFo&2Y?Y=egQciDe9wa?bn9+DS|G~@rnzN6$bBXAn|=SSUM zqF~M6_QS-DIy}H`k^SSqdcrK|`y0cKLGI4wc!Ep`1K`q<^fJCDG=vaih?9SQ&CQt- zeSE48jY1G0xlP6x=r=WMrn7T6lbW~PX{9I@kjL`Bgnxe^x3R=iTDaE5>Xps(whYc6 zl9U$t`^T-+*Kdwp=RSUJDb->D%=XnByV%g?-=Pd;{j`v-uhm6p~Vg?9yCQH7{rifcn-hnmY))zpk??h0KUeOL~8QsHiIcC~9S z)U;T79G$q zTdUnHlZR@ssF;}O=$K8yWyyhI0}s_qf_Yozn6gl58%dF;pa69rc_CFoKE9GX_;|mZ zMc^vPBN~4jJ+jmy%I!gel$3~w_I7O9tfK{D)E+))r>#=7J=KcCSUm4VAi8BC zkcjJjKWOxx)Y{Y^QNO;#3jbozK?i6YYO^$xt+}Afg-lwyXv^5Y)TT~1< zwr!6lyU*n3tt4?-Qmu;(%imCqL>QQOuXP%jS~cugCQ*UrLkVDz*m_j`6@kJ9u3hv8kbUr<}KA5 zZ6S>}K)c8dyrx`KUPTQ)i+gHdV10eyv9w2aT8}rax${*k;fR^{N-omD4!{#waT z!pk4V$DX{ap&9U}{-1`kU^CbXk{o~=PhlIQ_6WmRev79!=(lc|+uK8-_63V1&#XsX zNeMJo6^z&7KMk1TrDTub!>#Od;hOAf>d4(fRG#Wt@O6f~*S`HzdEQek42aQXbCREX2QNYNb)<9@KP0FHckB3UrGs{7}&0OHS| zeSM3IeL8zJb&g;V$(|XcZB3;qgV?;DK0Q4>Gc(=PZ0hUcTeDae$DpbW3`0-k6Kl{l z@?dKcoMVpoweH~|T+A$P8#z%A-Z?@7TTnf`r$%!LD{Ciiox{*JaZD%Ejte{oyQrY# z0-nuSyG1%}jc?iUe88OUE3WbZvR9PG;NVq(`y{%1pgn9BF@pst#$l)fre$M zY3G5i@1&G9=%h}05sd7Osf^eon{ErQj=OSkS(T@~RLpg;v-1Huk>n=i;Fa=xmv2;q z1^lk7DMJ{b*hN%12M1`yzOqtVdp|@Ham9V$xRGgHK!BeN`|yw~(btjRdAGj);Gn)f zGt=f1GfVi%X>>`BMw-l6s~BTZWhD|>Sy{G$wz0QoWu;+vaIoAIS|)}gnb>7L9K1lk zcamLQS?TR{mf}QHnX@61k_;WKtpi|DCl@JZP&xq5sU(th-zWO-vZKh*k>m+3%)JM>{K_;u1sPqN+}jkC}A=Q$-b|JaQ!aYb0gOv zSo__6dFxVdz@IAt0TvtZv4Bxfb3(%T1V5GNWy(>jfDHwDu! zsX~ek-uK)M)LZUI(#`2geQpXZrmYGNo|*~{u2D(cX^R!wrY)bI6v}4CiTRXDJ7p*8 zxeFVL%Iw#l=Azn9@DF-u$=I9Olr_7{`6 z9JdLZkFzxstLVS0_KxTEac?Y#Ayjrz@ZNy;9LK)bzOBs$W^+d}D#kyQ{bNc>dU{I1 z57Bzj+f2uw>M*&u(z@oAZ8kY2;^R4gdahBgrdr zY<;+J&~CWU-CB}&LjbaTdqUht6#hfOqDT<^D10}i)^5DZdQmV{TM&|U{W8Iio=(o? z4wtRyU9aCQOOLhGep~X4dZuG1CdNCcr*Rz)o{<$>vKyo=mS*tfedj?VqI7Ye&vYHX zxCmrkx~2)P9-k#&UjYaHFk72RT5r4 zqvODQ2``Jo_ZV~WX_k*C0-JM*6u!#0;;emQ$2x`ZnCRyOaIgFBHfj9ld{1y*u4lcFW=sA7XfuTHn4nuH^XTeY;Y!8lRI68Deu@HZb33tfxY$vXoQ@5 zWRazJq^e6lSo`hk&qc*f)q+IL;vi@f4{Y7z;ic94a=YlIbL-MBK~mXEhL9K@?iLHl zFaE{SPT1y4+H38Xn?8J}xGo-o#r@RYx(b~mu=R~jKJe_Kf&FfYUO)SaN0^B8lMo6ad8L-Cfd2;Rs? z`&xw~{iej7$(Vrs1*FC+8@@J0A~3gt0oy1>m-rj9Y2&K6J>P@vHK4mwE89aS!{EoB zJ_(hvn56gU?sI;9#zmuv>1H)eEiK)Kx9(i;vCdSUsGu{1^exnspMKLr^a2mG^eSt8 zSZ+ONGFQ7lmjgX#*_@;2+c$-{21T~L6_Lz?#`v$lnCNw$X7yr=HQ8^vXmqS3^0g0A z<}_}Ul_=J_uszbxbiC8ozB4*_>rPnFJI4l-#En8Z+zPT#4_gHJ3RTvh99AKcruU0H z$?QG4pnG0B1{DUDxq_C?K7%!vKwgdW&UA#Roh1`KH7CPOtZWn%M)ip`Pn zu6Ans`R>7re8sH{z1}}wor~qKnFOsKpo>{zVll@VHW@5i>EH>&1-}4Gh1#xW0tUC9~ArKwf(aMZ7q1YvVxe_ z6Qc6ntJT9WAM9-RNf!$7J!v>5i*BDylzR*l7e#1lc}cSR$>f9fGJW}vtU{HEOAkXK z^Jt&5FuVKXPr4({SuuJ!n#wh@Pa#zcI28%~hAxmgk--xdu?T=VaaVkDtt%YUTBff- zCuUM5FyRSz*9tG_0yYS9JsaGw#(Cv~&BMi<41a!w04&;;nbhZWv>Y@voi!lKw!CRD z5+_oAnXBC8H)zj;@q}DnWCczZP6f+Ryni8G7tbX1;0Igpdk^K>eAo2|K^Io+2G@`O z=0ZprYnH!+Tl^pFy$4j2Y1=N!jCFJ@NEZYJ6{QLYC|#vE1A+*l8z;sEzz~smWV-AuNlnKwSPm~~T zcbPTj@(RpjdR>HCPhI1CotWJe8w>Cv&5GN9|9yGw!0$dz^P+dIjs-g9^Tb%ss_KUu zNOhx)^RJ~CXNz1jv4x+^uo7$I=c+HTxAgj!Zgp))DaEp0E>geR+!7bPuW;18vnEWt z<5R#d@zs0&Na^TTxfbApF}DrArkF8tzxtqof)GuOGib8gQ>x&;rS65)A>kbamlpv8 zrDAu@t3~Z|U0_ezb_u~f9V%pjf78lLPQFTm!Gtx32%*V8S~cq*c^TZTwb$mSQjyE) zbb8jr7yk#xJd~Z&kI(i^-?D}Zjn5E(5l@}nl%B7LD6e8)+8LZLtuoW&{Pd|I5>r5O zQk0gGl9iRJl|%9CskVI;#Tc_a6Dw*>w?HiAn4<9xaqGR}83iLTS(xqz@IQpss@vly zJ{dmYP+>$O34$mWKHm&?ui($rS+>o70-8`vO==mj-#MX zl<%ZFdh+i;<^ur@GI1Aj4M+B3SK+4Bo@7^lEMw!!&o>sZi>2>gq-%Wb_(QIn^K~NA z-CAYr3yz-pl}{!j5?cOcV<==2opsR|*{ONbNp#gz?bI{nSe{=x{o~te-;gY@Wp{PE z9iq%D^hsr~q<1dBe|RGns9I&qFxYFqj1V8us=r#GFiEbB%PtKGw7ulGmP z&mWSG$hzX(-?@ zPG$KUuHOX5dL~?K_>N7jVsiQ~Sn8K_MC|@0elW~TUSj3axDFtH?R0%{)u@J z{?AnR+9zgapjF87^fg90Ik%4cqFd*ji>R2GxM4&@tt!lu7QwA;R7SlHzBw3P4)F#$ zuULDp{rUccGm)+x7*`BnrWsV-82m#g?ns`Y5pQUN&SqK+IuPZ`nV%oLYFVf!C}^J8 z8m6^D|Fty|!HHXD%88S}4xQzpQ>TjiFIDM%0OY|q%J--C_FP={_5r;c)}J7lq~}!8hD$#7Mm^Fy?ua|uL#ww zqF;icLe|q;&oWvvLZC-1SsvmNEBpaBeEjnF!qsqtdpcJlNmy#Q)MMUGQ^3+w;SGLl z1^%Y;x7=w_2_b*JMpv23u0Uh;-XbOPG1Y`=)!KvznP=T%{vuw=p^6D{B>d5ebVY zbv8m0j>`y#-?kYGSv{>$Kk`imitFx>O^au3FRAobAwz;Y*wGg^;VXXMTIQau&Gm$t z+h5K@p{Q%&S3dc~vy)Py*7e)9q_Et3)n-?4cBEq&)>Gow?e-kw9S*;(wzv9g0#Mrh zpl7Bh|M6psuLX2$(WYNlMD|6Fw~#Oc>>4ye%!vHVBw9i*ACeO0Wi<5^rz04SFHX0; z!cxwPOj#ww=-$6!sgQG~nB}25u{_jvm)Wm6lYQSdr*d&p8;7wg&Q2~MAgun*_&9);O`75#Y0IQhb(5GkOGN5wHUswDNFubiyJr%Lq9ye5mHzvSU2nW0{ ziK%o#dq$@H9)(m-q1$28*W5(R>x!vT*JsxwCSW?GBV#T$>IEttF@2pq!m@gJ7n^O< z0y`k~+&;S9x~whu&YWdpODc=n{`}_RY1^EB?V^EUm)-*_$&}+o?0FHwVYBH7k9O#q=r6xfzY6$SeH5Eis^;scIT8B zxUYrTTfKQ6lWwb6aDOG@L5|!WmGmUGRGgYqbyNl(@~~h^HsPSjx|cO0To$p>EL#(T zwtw*Yh`wUo1C!H_(ry%fEqrqgg9xF>5U!4}lQjm{bWK5vp4DtWrrJE@^ypTQaT@M< zPyVg9TT;eH%rmQ9yV&UoDZ@d*y_=Y@P*`PeTiZy( z$31631}<=gDlu5cG+-G{$`>aYW0*SKlvTi8&LhS2Bq!SjV=63R@NkK{ z$p~+6INVzy%|Biizwu+NdDa863^Ncv3*D}im-nrrr1)ziLTm>_7%22_X5B7eoF#Y6 zovXJcYw#Kj-g#}*Cg1SY&07iiFTV^OL!ik^CHYmU~eWbG2J2|JBE5l@#|ru<8E$h{_l!9 z8qJ~~NJKHiq^uub&%HT>u~tF08d#Fg1QVoZA7$A<-XLOkL^l@~c`*=0o_tK*%oF~6 zyBlC$@Qh;nuK}#hHPV)#qQFH5D;};ttf<;oSC8#rKCnv~(`lty5pc zyC+dy+>A1l(LFZp9fgNkZbLW!juhjRt+;%z9U4Zv^64ovhdw>S zeeu!LFrz2NM}VN6(zl5(xSrcxaJ_lpWZFyin}@*Q&3v8jbtIHl>V4pxVC-|9^mwmM zVESdL^1(FT$v(g=Z*LFTJ;S?E#?_r1*43InK4Fe>y`i4|%EgYGdt)iKO|NH4=lXk# z_42Anf8YO`t{JgAoI!Rl_!cSax0wEU(ZIaGrrLk(Pbw6Dq#vg>`(6vlc&|0S7Vg9k zWWh!bBcN|O?#|V@^fMI+Z}wy6b7C?Ea!X#?K2kd@>%qU}>0mM5-b+i~-d-A+@7@Pf zK_1|9cX8r$>|e%!=RB#SeQf7EDcL$m)TfuTu67Odd5tCLGLxn+1enDhJhiyD_ckIr+YLWk*v7TOim5}Clsq=r`%}fdt(g0K zlUzsY#S^dFP95@lNRn8edf;J0C+a zj*7Q2iXZr`{IceuMuPBL;sTkftDgx=${}$q2iN-il;5KESJZU=bmBq0(x$ z+uRlNgXrqz6;KfooY$M<63#R{8mDjd^@knN;XUzYH?UYQB>OcL0CTs30ty>67OPAI zKc=>uEuNjrE&g>Mxn}R)=4PWX48|;omphwg+4E}(7>x=(t176eT(W%$>Z_4=^$pq@ z&b;GB#WT&PvNA-G&pOu!?5Pgh0Mcn~u0nC(JTn|;=ju&Avwg<^Wnwx)t7~G)08Tn0 z*el@j?FTMwZEBQM7Qn+LxT_*8d=gzx>)fO}WkyyO(e;@6_G*D>$JYte(P^>6QG;#F;p7 z`_QdjY2$`i5wb)?-cS`Y6q{vPTAoAL;rKWw=lHnA46JI!<}K4-o2l*whcP2&aB=aA zQ+zF9r;B=IYzO?kN}0}zvQm;KGMe8JI~_zjz>G$YW;_%0)46tutlcB!hTOqr7L}SrXMs$jI}NtWXK_jiV~A+_c^&1sti7IlZaS!~jl#Gx3FvfwX7N(UXiVcf?welb;sE4h~-WQ8iB5r(L{M1%DO% zOUgu1Und18DZf!^E?2ksrvI2oaA|ywl3Y?65@dU(xbxiu$ba(87Brc>IibUQ+T)3_ zGB z>dzn;<7a+5bp`v^@)dQpOW$~*KG9C!6|y(*J*o07S1dQ45O*!}{@7UC~u6K44tSOr>|$Lyb~Xbk;i`wy(vt#Q188@xic{PxEO#R>vJ)iWj0s zHYUb#J9*Rhb+i-cQj56K5Dy{C?rPZ%37s0hvGW0U9Y#kR0Xeu8WqqW0Z5zic#r@SQ zSftuQj~|gyg=iOV%cLN!8O5nF^?a4td-6(2cfbxftlZbC$3h{&J{3{j?zn-*C7llfz z(m;-Ju%S)p!>vmrI==RJQOX%sWudgQzuy_1pU+MWIk@T24$^0*5mz{BWi)iP zwrJV_{fIxmiYLVA<$9U7lk)b>*7a6;nF7%%Tu-n>I$KkPVD!C@NV$9kW>kW14wz5) z;ag=LiBKVOeZAdCFW)(bF?wM9%t?P$ykA65ZJuaH@XTjAQ2X{dGIV$kBVAEFx;sA5 zu5YLk+ClU(w;Bz3Og+@fbl*Pb0A>y?!ah)2NNWny^EvtCd1N_GIEwRl*p6!>Km5PB z5Q%nw$O#B^K1(V55u2hG;tUhp(XOs6|2+R}XSu{Rd%_%7GJb&ccCZnGau<7sjyl~Cxd8RoC(RX{rvdV??`X4bPToRa7pZFQO+5qQCsHWqT z_#2b(Ld8z;qAN-kSF4>#E_cS7>cY(n-UQ9|=O6JU)qeiWtl7-UgjV6nU$6Jh4xOV+ z1{qz>JP}~CZLMFt2xa^QCEduysN5WlK{GaEj7TART~hE?A`#o;KNcDa22JhK2G?{~ zd)r?t5O-GjIY7o)rWNor0RVRn^)&NbrRR}Zj}Jm1KYzU|*T{{zSxim1eoCn-ij1tQHByVK+CB=hIH6Zu_kyoOls~Lue1@}~9?&?~ zPK)%X-7IcZx3c$6a`M1Hazi=qVM1Ug>B&i zVlgYFlRraaUhSUGR<~+w8LLSEQ0y=pjT9L!oOdG3o8ecOHWTD84cPbe>RHyuK6VkG z=4s6n$!rc0Y`HQQ{n-pW5?a;#;z{EyUub@)dZ{_6+>yXgTGw1WYjwJm*YEZ)}tro-rC zK4!889A8|woNsmNIFe3eP|=JER{xvQ@ilyubIs;m@11NrHME!Ufn{uDYiH;8+()GgecW2<$^iNM! zRS$^#SbP8FOG*3O$_gtiwHouRp;M*{@$||WC!ck^!Xfv5bfj$=slR}qTW zQ1GTv=yAvQqwJ$O1HQSge6YAk=}4cQ)r%^Qnx4kfS15s{0mx<%y@@k`rTp4vI|Jl* z7@O_lj}PN0CpOxM-ksHBj0FKiy;zS3|CDjtWGS!C#X zHZWq{+v!%U=m@e!5giYPXrXy$NM3N%@)*e0mRm#hlWcFORNe3yvABubZK*2=v%SgS zY&^&#up}B>74FmC8f=|ankXDHY)`&7!hX|r=}+N24KzNyg%zbvwpicaxvomDT0T%? z-i;;1Va!|`2+3T}7R2&L7duHP{s{eSHRLHa+?TT^QS-&SRsaC|ZrY*-2>o%j#-C-Z z1|>M%-A*PEtU8`khC;^0*86JwLEGNXA~2Y2I4U^OJn(}^cef)E>G?eSTi0B5$F?fZhuCqByJzx7iV zn_)8}qoc8sMpki^m~cDgxO}6;>sr+c^DGW|2zwq`>#H5-R0w&@-J8CdsIc++liBTR zE$`Ah0Rg@kn(uadxrq>kIKg)SVLzBQgo9Pzt=%Qq07-98PjjwXI}3LRua#j1m#pp* z5J{AZ-uxgM&WHVTe;ggn2R-G79FQg@qoO=tiW7FETTidf#>Ng# zI6OM68c@T$xRxS2PxLHHl;N!Q!8Gg7#f>OOMR$yJ>?vv7yH2AXrJ zlvG5e0{lhh<>02>=gL-tr}+f%K9e6GLknMkW&%Lc$WF8!HP(qsi_Af#xIv=*W(;QQ zr@Hn_9XeGV;O$k^-BCZ|s=Ly3*VTe4vs%8)C?7oiOH#-CdPQz{cVi4_IWvkI3~LKL zcY6dExhFh-Uu;e5`Ro$h% zZ}mq;;$`YM+f^@l4B1$e2P~!dxnTT;ZaaOL1I+JcFtO)gK)Y>`$UN1vU)kBT=Uf1D z)^Je`CGrU9deLx3R@8j_gz_?B^3(1 zt`E@>*7(-zx4tkjK`7bydsx5=OmRJjr{)~wU z1?~upBXe*-wy5oy&mbjilfHHq8mFTDzKE=3oGn+yy~5l~F)@zwEX{1XMMd8juu1@9 zIcVwPF}5L&9fUS{O@4j6H(o|@2oRIj)rso}mJGa9J1>8C(P1R;8S_$FRa+waa(5h0 zXnc0Rg`?M#vjv0vJOcBq?2WQ#vKy`*6rNt5N7l@!Geg|ik&5aUgG`DUwdpRiMVqN^?hEGqal>H(#KDUE5|A;-x+N7}m;RKO8vQ4rE{g-}x z{Med2-iAV@sPY+C+#N7E!g=4_W<(*1^vod3$sl4_-i=9wM8Q^wR? zRj=+?4#7P==5T9v-$#ganCmnaFO5})m>l0N_)Ek3c~U0fZB}?SOR;}pH8z9Io_~2* z%Cq8oTgW;4vTGwH?fVaCQr#?Pvol0F`;wLU`25mQ1hi4AkihwXVy{6KMAEAmwl8_P zpDjg3hK2?PhCkf7zJK(e3EGRKbP@(|DxV<>s7G z^xL;3`cL<=c&oLN*pD=E*ce0JvQLY@M$Y4uRjEU8wxALsmDGaDOO_VC0|#xfsadw6 zoij7M@_EYSQyWsj(V%wMZ++SQ;U8fY zY#e*$Vi+iim01K`7|)tjJ|8gFH0nT|+V30W220$W*jTzC&>%FHK7Yzwep<|#X|O<+t67gtOSp!BMeW`6xW=niCswS1~< zBbYU80kdx6QJ=vHLw??$`CueGHvD3R5Oq&=Idz z*#7n_=Q_LAqC4IM_uHrtH?x}+?#%0E$!}fP3ujs-xB3>#$8LDp`c6aL$JRmU%#;e)9JKp#6fM{4M^QO(P9YpGJwu!sl7% zHf75k2l6J{#kM$J89aY&?hqSjj)mF-#Ld%-MrmnTnVyByvi1r1iZ&0Kv(7HPb$0A6 zTS#5jE&+%d1lzH?tQCG=k>(-R z%X_7|^!14M*v$+@2Q9H$gnS(q!4vqk~Qfgj7>CS zJ*wE6mEQ1SfwKS~7QM)u9GMhpOe~sV?Q@PnR>M8OR>{n2JIC5^0`1yI8UUL#Y?}FS z_dK>X@7WV+C{FK|+!5ARzWR;b=A~81?l@Uvx1TV}KZ7^@*Bdm2g#jK9#|zIJ!=Iz? z-_Oqn@KOCa4EI{nPhG^kF|9Du|K+ZdyR`3MjXsIcufin3ZO)ZoR=Ulttyz6GTQ{v$ zFpp$_C8w`7d$167LFS2xuKx4*58Az+vBSQ5LuXkiCts{RY18%rNJs^ngsLom7xd9ywk3Xe7##fRQ@fginhh+RwNUq<>{R^` zMO9TzO;wQYu_DkFoDEY{yieLij*O(IkBoq_-u{)9SOCh%JYHE_K4WHOZ*8HM8KV}T zYHnsCDc=vs+9H-(o8#HILisa2EZYne0ISVyB5QV*xakD1qHnNO4s+PT!j{OK7HtKI z%(1b$x|J2`T5oSS9>E5)GQB1wG&3(ERFcs^dc&?)~6I*;fuKaY?r04d1t{)tmXX9wS39a;IJ%UvFQ zI4!5o8ZxY6f5Kf$1Gef!AN z(p%$cqC5h8tiIQScN_|t=nVpbTLXpZ`){$J-oWBtXX)a5afyl9-A+!$%@*UK_eQqn zCkI(sERLgWd!-f5p84Y;+=amq2$4sAjM>_KC`zXc z6eQQKhnp-N|JicOy7PO6Y=hr#$DAv$1Ewag?ohWQHi;Y581VQSC{sa#V?v0fpPX+UpQcV#1MJdgF|EVg<(dJHhgB5TUbV3L;s zsR0uoZ>zKMUK_i%afprk_dmmTsjw~0t#me?6=}daFd>gESS(yKU=A|bnQ`}JCO^Od z5ndwV&cf$toc6Iv2teIpk3?@dKYx9;w0B1a$aV|QOiNaEzwivCJUoz*`BUGIu~+7h zyxL}q!lzG;*az}DZ|j+C6K3XHei_HsXZr05R}O#r{Mox574c#a*2MX`K0AAIGCTX{ z1rxWn?kixXJWCH?7gPv`<}f+M!eYQXp3bctPLW>9?0Kqm#Pk*>LQe+ zcm|}h0o~Xbe*=B~3peb=s{8^N=A}q>S9EkVm$k?wW{^5(25PVY=6yQZ(9IJkI#hLbb_n3B;C_=a8#IFZC=ph%D@v9D{&M+_= zlaewU5v21j0;w<8A(yA=G$7TsUCxJ#bv7!xV)#R*#oi*t+P8_9LufDYbxR4EUUzOYKGgm+JlHKHyo(1tyB-Dh|s?;Pqy{8_aK_< zGt+3CVr>W&GJKUQLORIlhdDkeF&ogEcGb<~LhB63Iy^7y8x>rgyI$Xu^IvBgORc z-pLV}jPgL83}{EB8)zRTX>eZ6N{U>o~L^{Q^k&xnhYH;M8Ak(3Fv;kwHFxTX=7U*T@w+fF!x6$b>{{Q+?k2>^^ECGda35?a#!9; z@MgKiSN(XzQnG$LC^W%+04N5}&GM`mb4YIgVD1Y({Uf+9p+H!(y`0C$5pA>Ne3SeVB&zQgmk^r~x*csK4W&5c)z&^|)!v(VxEwp; z>m$VdvldME{}mrA5Ylq&$&3g>JUd0&%?Zm+|J70(x=51cbPSm=nD@ntzB}|icBa_D zUNQg-YFZZZti|_6T)rx0jG0!5D_C2LzMg zK;X7j@Yuz)eVl5ou9wJ}W>6`39;#1PR3gd!61EdFKKc7sN5_YCf^b^j#YO1r5_4k0 zg-zJ4X_ljdV+UOl2fsg89$H^1B>JbHyLIUGqXf}11N9OQozYXWi`Rr?CUv&U+Eu79 z4!=$uvX>-;KEYSmG(>g_G*2iwDZAOVzKHs)-Hq#$AVu#sgz$W8G=sIk4c&AIN~V&l zbGI&SA8gtzoY`JeWK@^b2uEyG2IEW}UHuBLL6xp10C2$)8G2VGPwZj)kJB?$U3g$& zlV$bEUU3bXs(pOn=FNK{diqLAIJj!fk^UmU5X7McVH??0?cHJ-e@I6P=p=|)2juq0 zX&)DrMR2byOofOzI*!<+AZOLXe8{AaAM3&idZJ++?kgWq=K~feI%3495%Cw4HRwxd z&H0kg9@{v7&6PM83px3mM>;#_fjwR2bXBmfeot;?%~IPE$!jeAvfy6neV4#3n})Yc zk2h-@SMfCSbY2?2^65xB!D6jHplyJx64>}M1)_^+F)X$A91#wa@r(5HwaUaH#TOZVF_*1kXHe1 zDCy}m%YxmF-D|(?Pj<=uFqW9O(OsBoj*@bDGp5rJXzr98>Tjw2Ta-{@!nwO|4EW_f zKCWw@bj%^#oPaci?7rcp_(4VV-k793%zE-ns;%1VfZ*6)?FZTh0+%HCcwHN?msI1J zwkWIVM+Pg2VLCz{$uttt9O~1KBd)pt41dJSeNDCVW44;2zZ!4Lp8G>s3>L#Teg4j{ zCu8;fefm=#(X!{*deJZdkTH)$j_4+uhresCmxzJ5y3L4d3JB)9=nERw`L5^0=HKJL zFk0%{h29|Jlvyv_M z;zE*~X^C;ACXcK{%?^2`(H|k2OtC6K{&pb9q!-47P_?^@+RAmedx>g5P6mRGn}zTF zR^ua__|NqfQ_zC-ygH4*4s;_#l_-b!5ezHhspGYtE&W($X3p3gUpY8XpOaB1Yzi^; z@ehoZZ!$20HuO)x1Wz$GEz;IsETx)gh!Vm{exba&3q|XR{|~pPPd9eYn)3^li16CL zXCLxiJttd=2*V-C>3wx_}qA3ez`ol=`Rjt9{kZP$t6B_@X z2g^^?($~%+Pj!d$6++!BrD+Fm#*-vseRruh^B)P`80+Y$#_MLe25iI_#l@!!57a}& z?L^2gNxhSU>+9`E4GrP>KGQd`sM7@-@jT=jM32YswCuQe=f1#LJ4Y9eURnTc&JREj z&Lcp1cdBdDVTf1I`gS6}hm^6`Re5M8THV?U^*%NhNy!}V-tYFeDE9azGZ7Fu-s{>_ zEOzLb7jSE45&&1?02rMlk(T1#8Vvtfo+J=DGr?%-N}%>UT|ha5XG8oH73lQ z!oH`df>wwW)oJTA!@B4idG&z5wGM`@b?%#8fndAMe`E=^L@}T^x z3z_B{KNx}Zvpo+XgYOJz_%o{10b*NQWJC~y`jF6!85sa*K}<(yd9LaTD}#RTN^)gX z^pW$6en`L3&u{gC_`cO&(UAl2Ki@xo{P=h;b(OXlPhG_*ZdY>KDVbDX?=z75B8p>w zoczsJC!HN1s?mdU`G0S891C8Dga6D!e$|lE_oi; zxv{e-&_**e0JXw}$tdQ@6iwsQ>}9>;aAH+Tv6AcE#jfneMoqfc?a3&lP%St~n#zsN z{H)%i9ys4+3HOu?wX*L}N+nRdKTXLgc8Z+;%s?4#bFs|*3f68RPV%FAfE>nc`zU3_ zNTULQGr;8RHm^JW+W1l~!S`|iVB-qzFD!SbEKAHn4{-l∓oMW0HoPxzTTRKARAD zn{(HB->HcXlT43q>$G_moGYm=*Sy(uriv{IbIy)UL#=J}RcuKK=*w0{E-f~b;hoY` zSoxw5Zfj0Jikr3zrAt6No6XHOn$jP3N3H1nC5mU<*c<*A6Ah1*ttjRS z7Mn-9SNCw&0TB)VgT~cd;t1L_m-v*2QQmmSJqK}GOG))+qj+;O&DPULvh1yV`?s=L zJ9?0u+H)$hGIwL)w!dPDSRcTgo;l_Jyi%f%9 z4EI!6-EzER>UpN4GX6BTxcN}b?N#92i?)##8V0M7^rgWnnj4QGdvDIgu(Zo*zTJX8 z;A-53DCV9%dZX*LlnLtYL2(z)zur$)94jqy=GlIs^#Q#4omcDUPj~T1Dm-@Fo>Q5! zi4CgGmQYZ@Q@UFWplY|?Q@<$7?_c1I=Nqt=!inaC;68L^Mo#_WyWWVgg|bvHMe%DHjj{tUTgo@n8~ z_~e|D_R-ehrj3xAdeHhjeQx19$voWG`lOl_wmDvtGO;+;*cbykC#&{#mHoYK*#p)5 zmpQ9W?O(@PT5qKzq>=W%lqz;dExIRoOsM$ z!uKGL2%G83CXJuvi8{q~KkV6+A@I_mnkkbeSa)hiRHbbg3THUa_0XwzV4>?|gvuSJ zcb4q-^yVC-I1P5)>x;rg)Ri5-sHRui+XL#$J_|B~<(>p!1d}O2mudy7t{3F}t@@$t zddM~@JLS4gJt*5eF5}&ey(Q-hIWv=)-l3CIU_iX2IWJ$bMWrDGVlBiBxY2xPXUd*f z78MnNCfDS?tMe3*p3rxutvdmkd}pVjU}f1&&RDdJjfI89f$%Qrp`!AOP_N|XaKFj% z<#K@D6P|u!OlKj>zje8$n(*ZsmZu}ImoaQ=&~Lr@GN-H&#{UC)Lh?as-G%q`LR6c2 z*1&J&&Q5gtyt7l7eXHYYl8X-JvqizW5IoVM`meC;hW(bj@o_%OHJ7z14^))x;VMd| zn>NEoPyn_VrMyT#eC)N$r9-rcVIRY85NEn%o{>r2xdsd$4OqXS0o#+hmgvq*#&;1z zbmZyaX?T2}QW$nqrK}N{kH)a0H^@)|8pBD|pHU?^5-k|O4z;vYcn6?8rSy@3y&-cr z+Um^AfMJ<+hg{kxw%NXH!`#Q%9Yx##i_XbU3N~+)4#{m zO^*k=$YE|hdXy7P46)-j_nE#9&Htbhcd7?6WGl9_v?%8(fzfT6AS)_g#2pipX?Vr3 zhb&X1&fWVp*E6}LDfdCotT)9o0Um1@FGKsG94|8lQHfAI$k{^a(S5|QBc4{k_!Rj+ zmj9)P64=|e!h4}IN2Bv^f!cBe(?ku8HLAPO&#q8J^WyjbL+6*8V8BgL6AXdyg=2zA zwS~xFk~VZf^rN!zvAi0`C?kC2f?i&(MSa_tL~56>z38{4Y6&lqpf9c4%ZrrjD536R zQ)!TaMR^d>qY1v&-Km1j`~#PUyeTDD>JMG7C@u#GUStJXDCn*BFG6>=#VNPo{xmCl z?l@EGSk)6!`Y7TGTEq+T-Q6mRMK5#5eg)Au(xpzPc0AU6 z|B=A4*fw=hjZTXp(miu$w-aC=U(q>YHxObZ(c`@dby_4E79s2eKj(H?G$;wcFO*h$ zr^L^<+8qF+6kvqCPPRvU4=AkZ>3*H8dXjJ6SR#C<+8=o1>$=n5hNi3q3I2|l`z15u zg6h!m+FFA3ex)*W{k1KR*rC@tkrFNVxApjvKd%N~ogtJs1Li7X4T3y*0FNN&wxbIW zD*;RfDUQp%t;v#-%AUe%=gtQ#+`uxFWmff;Ur&x*Zf=%kv=XZ~?{?;|hp$Ie?sNV< zE#z}c8+}Bxtqry_UzFc#_&t$6dw*Mcx-4EzrC%V)@<1wm(kM5V^U3GjGrjsR#El-^ zJ_#65H-C)QaZZLG)OFStvrfJfv^lPcgdVB&kDrlunl7lzb&N02L|-p`?QbaOvxD1X3U$ZP)Eq@folAtwg=Mu%~iU(X+2hQ5^`Pc|sM#6ETEt#3;KdbBNtKr3zO4{28+wb3lMbv&^s zsBS-@(YYc~0vukfJ8D{vh?g>7Elk0EsPEYlr6(q`#9uo*!5%Y0tE(z_uex@6kuS&HP^F$@g6?h-FJf?;FE6TPF+`tn$BeFOIJ#_a z6MbN2d!nMPPfuwfI@oEuhNwq3wKjz?e7U43#|`q4#BZBAB&H3>1^N?cc)1Rk?9C5! zx0u)Gx~{nspWh`ahT#4To<4Tb9=8iRvIOt|`-)65uZ0wut!(JxgV{;EReB@a9Ll|9 z>6)pncHZ~cgYFLe+R_DCvwpyLX9TUyJ>11ulszD{hQ8b9B*$ z474Q-`?9l;lfMV~gzf&kxG0X6l68)kQN3~pHc3M294THWbiKg}S{Edwo$Y}H$ruX& zBOMz0>1l3CjgpaUM}1yfrCqjr%&5Q%=v(ykvDMZ#>GN^H>>PkKgC?tPlytw%Lz4(X zWA!OBGr?50muGj56^nMcC#0j2*@2%{jOa3;?W;}DN636=7>Hcp%tOECq zGum#T4D$UG1_QX{uT{g>jF6RZticUA3DvoQwPxG70cEAt7<2>*g`zEI+S{i99`g+< z9-VkQu!0%Xa8RzWE5?6TAZRg01-7=98}p1{$G27fVm~fzVB0GKNAHk6c~=#wLx~mh?NKy=~IQe|!StKmrcIFmMU11$?& z#_~I_w>bSc&RqUi=>*x=0}FkxEC3K`*#?p8MCc2e;42Osze zg~V~S2!w=-U>wB8n!4DG!L;EK^Kiy@&7gVs)+ZBLsTH;u|3^(0TbgRJztY#iVY&wg zZ)jMXo86oSo+PEv2n_}p0K*~}6YiAW@rnqZbrZ=TGBz|Bv%ror{|iHJ^dfQg4Ymwl zKtX*}dH@?0=|LRwVHTP%_xgc4C)1c`BzJh);@6j-f*;-EM&@`*HG84n51;qHb9Zt0 z1Dk$V3h^UG9E7ESf1CwZb7R2`F%Gx&K#V_LLoST)4FPsUniHESm-TG_8EWvK3$Z#H zS%bflPz1Wq(rNyq>*Pp$chty}kKlT3U6a<~KL|i0Vj!yT^5!OQw(6 zC9yr3)cnu<)3ZJKv9Z=zJ9P({_4ZvkfV8-dit}_kG`jnUZkF7|Ff<}? zq&^d^A=|Y+D;+Tvu`$h<#p3gIor)~b0nJ11rf2&FniGOC2{}~*nBI-a+UeTm9!I)o z-uSvLVbv|~1oKsBv8#z3S;mhJFWEijjG9(zi5G`TlRN^Xvt3*!XC8Ib6@4`1#6-m9 z;Xl@kJwGNP|6>QWCv&RCpFEKER?vENw?^E7b23skm8I{lY6SM>uYc2|uTOPHX|@>- zrSQxPlbbBY1FZKlulAFK-o@BPQclznXX?_z6^#L<-SlCic9H3Db5@`TVXL@Tb_5RFA@8BU>8@<-P|nC;x}7cb zv+PMn84v4VQ40aNDi2rnIOB|+-`U)vkoj`q6KZ#c1HQMp5U~ci#ZiAlbX;ZQkKPO8 zG1rY)zdp$Ii-?NP@cY-70+1#PUQ-gD4#KmTd|arsVG8H~U`xb$1rj(pJ!JZv?;NMk9OXql=v+zj{CGPYatuITvcT z9H)0G>9-`R$%Qkn?Vui|4o|v1MbW-fvc80xJ+6Aw2NM?;7vGy6!|5_JzRFU4raJ6r z+4Ca%UO9XXXg9yL63EszTk9nqbvgo@7Z%J#?sFSdkuUaj)r^gxT^#6Nq#~nT)i)EVyxF)k0{G&QC%&8jhrz(D8+T^C0O#eQ@89_6#;IGNw*|3 zjc{*>-BbD{X0hWv>G1!>-g`$i)xB+lQ6DQWQ@jGN#c&{S;(p2{rtxNc zUj}-flgqHVsdDH5Dxc4Xj(ke{&J_LieNa&DC^2dA#o;)#g7~iXXSGMLNU(2q3s_oM z_9nYw;GaM5Oj*|t4=wMmgM2$ngqpZL8Q|i3xOp9|AQKWQETIX7(ATfs+$3%SSqqc+ z-3AS*JZ(&+({e}0NXeZO1bdUteteC$3TFi;gKr|DSTSRKqV;BqgSIw#=R|sb8rJd( z$L%B&dJz*X_3CnawI%z~o4>#R{Q2gq0*9`c1F<5w`>NicHDOeJgh(ja^t-b~LJFQu z==2-5S7ln}k8x^R1X~wtq3&U#l2TXK^3#rUg0XwXtJSc_ z@S<|hgM|g`s!AC#_cTr_;rgUTmyPc(^v|?MFbhr^B~qMdud5@YotxW8r99D&4m9#( zt?R#)h6ASw2gcN2xmJi3~hCInuXkrguBG@ZE5_P{lA$95yLZtR2`XIFUL{K{1BD zmbSj=Xm^)5jGq-V6bRq`N|=-=W4~fj#s9v7fXm)K|1pVQ*kv94%Rt$~12Z)~4u}4% zwj7H-|6SoLpI5~0YiIT){xZVR5OVJ0!!_=_i4~rF<1LrWr(fW(vj&4rHY8c~zG0_) zPq2vQ5+GabiJ4koheJb_C5h(A_g`zbGA<}oZ9TfRRREy*ZoNToOb%DP$xu+t_m4ni z3kAfJNHgYVahVDAawAdTV@wqYd|W0i&Rjr^0 z<(tqKzlMe;?A25dtP~N}%h_!zTCFSE2?aS78prA$U;iGjCmJw`XClgsaJw^&J~O|5 zeIz5G?g{SCXum(nNn6++1v**Vqcb~!gYt$yyh_7>xCamiDw%8;b>uig6WqJ5(==UD zl!QB@b0j_Dp?eH(x{G>gqb}*_f8#5B*tEaYp2K?;TE$*st zvyNAgOcg)|mpx7TBG9IzsDw6jUZG=}gk^asEnQh3Y5Y11+O2{zhdpVj`~RGCY1)~c zb1`zopOx~O#^p{7*k^{3ZuDZ{D>F^{Zn8h}W85dhq1ey}403QU`5YF}+f)08G*RHn zQcRSOSx9vu?~(2*0=&3kgnk*QdN_6H*9T^n5?LvF-myfvq5;0{{Jb4{J=T+LY+#3)Jx?b9m4M(z)^iY4J!0|3AVs$enw7nbt5o4a#;-&VBJ+!$ z5Z5_xvZk#jk$!vq28DIVH7|nIfX=ImM_!zEQq5{487G3BQ&x5qo4vo-0aQJDT}-A2 zaz36;Bl@>3uj7k^g26xHiXc3rPst|XLS8kb_<`^T2;`7dwDJ0Z@|Vpylo1dMpTt@D;Y2z?k5;;+Jfn^7N#L~qWvRnSjO3v@Nh%657T@DzHm?$cy`U+yAd(N1y3o`hVl7i!6WYyz$?BfulVS#2(Wyjd7KmCp}V0xp8X1 zQ}u5@Je=*?+^n%HDgISY9an~4uRWjb5fd)*%*l-o;Nn1}KZU>1&}f-DlYhgV-oyAD#RgQ9}6nuDe#juf8?PSQYpWXdZY zar*<$_Vi_d$Hx|Km|;h8cpM{>yvd*0D5r?gXV|xdl%ZovfW*|k0{z|PI=$;wZ zTWbvAGJd(obE6VPGMRi%Q1Ppu(dWd3CPg7*m4pB`-5y=q-Xrrq-fx`hvU#iA04M=^ z?EZOtXkdtIVbRu^;U1t1gxxs-5k;&&!-^~os@ebA-5UJ(Q7MjwraZ13T;_8`dnw<; zF-L|BiOiLU&IS?}?sF1T9%GKn5ISdqMfcp8OUhGg-m}1dj`tdWVZf6)XI|Vw_aW#; zPwvEN$fx>vAI^ti+*eOQK9#S)tp8zQYIvw+8Y*$&>`N9vemht#C?M^d`HMm)c^e5u zV=LO2Ygtxi;#Xay6aDHh`3s(a2+bWkRs?_UuLp&*O+9rVGlgwvJos2f+c^=!ZTl0v z1nIDwJ#>S@FIgho!*W$iBYeQ0YRYM0U}8Fw++`m47O1$u9Z5??rRH4IWU2)4_ptXT zFT6?uB5eS6D|zrpS!tEwgikIZU>dr&Bxu3%Bk31uNvE?tc2`UH2#t3p}U@7nwvw?TZ8b-fdWoKSi83I5q@47PjJ-tPJg zsPsxrfrklRzfl6zf{^FOn;MOEAo`L@%_Scbf>4}#0;#UE(%9}fS_*?1Hi+ig6AtpB z0*!p7wSn{hJKj^=O8A1v$vzgsyohE2?xj<;N z>pkJQ3f&2hz)ViKjT3WXZ0c@gKBjK744Q|h)9nZsLfw%RENWj)j9g!7gJeTez(0`h zrG{cIYvaRvwO}lr2*i_x$I9}aq$!c&9ab1dEWbe@M{v^Od<`aEHg}noncN>0rHj%; zDMI&1VQMGkhakZ>HdZs)MP#9?1U(5Wv^}A&u)ci>bq6v;U`VFlC3p`9wmYnAQxZbe zP?y0HLQvz=9H>4+G8rm0r+s81U4~ihwK*UQlLY3&*Ed08HJSH44>={LgT%g~U%nnK zO%Zch4bI$g@Ya>mXD-2^L)s&ncW*KW@gMA9%1S1iSt1xgXbseYCu-~`sy(55I^lv> z%2+fQ=F0-Wv6waNc7eXpaGUdxb?EBmaW0GlP6|vKZ3U})nZy(nT~@{_EImAWQ{y8W zn5?rd`A%@q@I18t~zO+G2_#)g*Ho`TXa{HSkxQPQ~xX zFas;{XLEFdxNPm#zJIK%+eL0An$@;4JCI@&H)uO{H7jePri?|b%9hjNO~ftF%X9S6 zN6mULL1)QS+0{fE)~JM?|0<&n>yOY~o=9RDz>QUs^V=*Px)Kim>~(Zt;OYdkg6_degQfKN+@g>mbmza~2#a(&rjl*&GvB-M;0XCcwtZ_w% zQsS{?TI|KfHdYF~PKLP9|0S3yjN07ju~mvP3@W9DU1x*&8)h90K% z(!TUwIO`v_HHIuTUD7h%VT}r#oa^^C@1c>Mcjss-;2&v$nJI=dij8#bJmmqHmmt6@ zv9n3e+Z3}iLqoGYO$0YICor>2g4^?BRkUm@k1g~UpkEqg7V*m(9xB;LiA(8|Ld~0S z9^vNA?*~%5>tq<=$=D_yD5z|tr=VX7s=`Ip#%Hp*PjVo3uC@J`KFeazbolKoAPAny zn(u8sSlio#1MXQieC@-YLwypQLv%!zT%lFIc4g77xF+8tR^qWyv!?OvwZJEj)~&Wn z-Wz#r3g|sAKnv2+piylIh}@L%O!$CTSwUE^t&P>vYE545hJUQ7{?8H9ZiZ*wc5o#0 zOr9c(ig{3{&D|S%%(6}tQk_O}5#~Y7ZPs=xAKcxV$mk4w z2Z+x==AK55g&F$c3rv~7JzSwtPG}G2<8Gs=^^c;@8k~|yo7oC0d-ANbrRC42(7@E} z&FzY++H(CDTUU_+%PW9_UBB}DnN`gLgjXD`WoWD-Z;r?UjS|zr=;2`u$L1zd5pB7Z z)*pwpl*mREIaYt6WgvUtnHEZ{}0D0%P-KvWLSkr?aMsF89RLNzdBW{#l);}~~US)I)`BngwF^@xQ zXH*rc$%s#k{zgqU^orPa;ToR{U_ULhs`8MC%^*Kz|Dg0jB6vYgTO}T60$LE)? zP&?^I%^IQeE?(SPr_m>_cliz7$v%^>Jb&Iun_PHHQc!pPwFeXCs;RCJ^n#)I(}$KY z+)8vU9HmI-dvZB$7!P;-sU`(?m>E zyOTCVQWkG)w8kEeTSrBe!DDMKnZdQh)W`OkV3Vb#N9z$qMZ^BX&*DD;BBQA#4;9qC z$~O;j3NSw9?o~^jP(+>1E>MvOg?CfXE-uVxdm(PbizB--VY9S4LEL4YII9}?6KC|I zC>HI7Wlw33a$>>HGkweTwtJLnx0AIST|r%lUN6kNY>vJ(>2FbZ73!G0=)#N`?k`+W z6SY@7Kb(3x01%zct2s16BJ*5936jT7ZPoK2Xvh87L-y)ruF2-T;gl49%NO&lamQ8> zk7{Uy?d(Pa-HHPn+Y;dTRsQ5vdxRqZxTG1V(&{bAHg=AT)~(!pDcmR-ZIqq&zl-3w z@!m63_n|=&XK3A<19&h4+xscSv^?4_I>yEC?Xr~kUTGOKHSKP(jz!b51-e9x$rSJH zbN4t&roNS^%Y7KOtO0ApVTUxao)s~yNk+n2R>o!Va55?8AUG%X8~Vz#nqZ!M%YdFD z_jc1JvPUWh?sU)uR&IZdY8ssc%Qhhz+8?IgLGY?I^_kz;B3Nvv@vkr(ya_HcQS`^t z3ckaR{BfZoqIr9yK*LGZZ`_tkbYOI~W^-U*ds#^d$*XA7qNSC>Q=!-%n`SFJxzA!n zjy%fVEJMKEYqr??*sx{s5jpQr5%!z4VReNw!7H|w1vkB1vblct(=z(hxtPFy+nQKBm<+Swi~V%g$7dB+>#6tRb?_e$C}b-RGIzG zwspoy?VUlRzXr2=VBr}zKtkRc)PkGYL^-fnPPN|OwynX9ju^o<1%gZNRp*m}t)iuY zrwO-%T`+hmt|qcq+WOFVPQrL}1WVlfU^hI1y3o>B)+VYvPkvk9rDAh^3L4Y1;Km8{ z{xHfnY%z&>h+idndF$`WB_1FcZ*I~1Z$j1as1Ts_tYeR|0}fdwLH3Lx z8lIfIlf1w7B4n_mQK`qxWoTQGoTc^elbV=-&U^1R8bQ(W(Dp>RQL*uuB*?qI_Z6Lj z*J>dZ-4lGu8)d{F^o~?@LRFbsY-i6#7SH9g865VZ;PCV~g5nmR?+X5x;qC>ZBU$=~u#ny?8579g37iOwiqH4|B z7Z$>qaL%X;_<$h2QG39wZnZU5DR-)h!?rmxhASEANd zs`sDUxrTv*)%)DvV+vk4;E%@jB=W*%xr4UtZw{^Sx;u+@c{SPR@w=2`H4^rnVN-=A z^;8)BrcA907rmX*Nx12rh`qzPa(#3EA4?4eHGD094W;o_4FQWIyesD6wRC5ms@>K{ z-QI_t6C5$T=s31Nd`_8mCl)83+lgiGPups$o`K%@SU5~7fKS>mEHtApd>Gb?xGdRh z z>m|?m_9oB~lTZd9_EZ*0##toWYFpCyT=;% z8A0<*0obgL-~*?GCoh~rxuTyMOtOa~>kTivQ=db;pHnyf_}vtT67%1DR30kid3g!n zHAVvLff(`Y7m!9sw>DP|>Qc_si9rvJS}g~}@R1AJbdvYU$>;2kUzso=AB;|`cNlB+ zQF2(g@tx9%Ji3)5F3msJ^%1u@xT#pw!@|kfP8+G$XLkLk8+?xF{FyDx{7t}Al|KL( ze5eCoVM%mB-Ln{zHJvWGa6)l!c~vVmj+S;N@(rUfcK)Gj#h|UMXD1_9tL+WA+QsJv ze4^o$i5E?%cBWv=g~8biT6Aa3PyRHw>$Hk8k&INGjz;sS0 zGgm!nCX$slJQ}Q)xv%l#PLWg;g|L{efrqe1zH>*d1~yV3e^;E{sFW0OET}03d;a|f zkjNhDhspJ)KC*c2HlRFrP1f@$X)C${t`G8w!_kCPM{iMom61%zK0A|dtu1+r%Lylj z)c@R}SS!|zRGqgdDkW8;`+DQcy{eR>c^O`nnz>aBvT8$r>3Y!6&}LLnrjOn|xR}$M zZD$@%^U#48rn_|>0ad>cWnqVs6ixJ=8+$HT9dC5HQ`t(qR+!zTS;!yvh+oCY13b19 z*9C!-DAzZ`GrJfbfe#Q?Q{S5Q}*7Ad@fi0Z*#YKhp6g?*xjK8yq}`EKWrbF zs=6$T?QM>#UF%b5JObK9wd;#{6cHgZy}J^*OpL!e^;-rx+VW^_4E1DGj?Eoc&`Zq@ z)b4T{O#StsRZp8~$qD}h%v%$JdMYRd5XQSjmMbS=@tl6$r~RhN;@kv zJn*?X@A)n1adoqtr^9b)Xs-gB!vc8*G^?a(J6p5&S(ey0!*PwW4z6oy9Piv3!j(*s zqoF3tRA>4$G`zm#O8MHs(o#ue1Gvs2g%ZY9JYUW)1mWsFsXwxh59=0|sF*z;Bb)h_O?z)(st#0=A$|Ch3y>^jv?H898 ztf4g17B9lzmasXz2;#cSE@;6Mpomxl9?1T%DuUK}rlN@b_t>5lAwi?6HVQL<<-|xS zgt7iOP6Lt+WMxSmAQu+C`9?*Zz1#-&?tzPN4i>H@>FHP8@|2 zAGwJNQD{_}%{({loC6*S#e{`6?idZ0UgZVO*}T}3_c$St@~70S=nfSr?Oa8 zw~#hz5AW}15=*pOOdsHgrS)|=PVb%yq|my*a@uW+*YRzue5Yv_=cONrog-51kBf+1 z>FcFg(h)P~$(=*cqSr;dmUEc(oj)9$vl@jvI+*PCXT6(emgo$buv51AvAKDfxx2Tl z-Az#;H#akGt-AiQW|4AZ`oy(f+miaBRb;-=N0Y%3Pj55E$CrXdo6c_I5$OF7d`1vJ zia96EwwRVog}TfBU8_c9mb)pLK5xN6ZwgfQXVPid`I=k}D^wro`(H7auvdoGG*{n^d88K1)sIuJ;rZ$F9tU*(wr5dIR(luB% z_rQ6j*NgdRSwFEWd49<&?~UuvX}0)ZYzY zIa+gJ@rwWcr;(gt{li9mSoJ^f3oT!3P4l)6v@92pw@YR=2k`UAu^l#uqf#Y>&$G_7 zLiv>V>4Z-o)Gb6SYHwA;$X1b}EDro>8*H8xb|@o5+Zo5F1^pZr^64gaYiLAW9r{Eo z-G$J8g|W@hChiV`W;#$Z&w*X=^im7~9h|%@<0&o=DRC3i(#Mz(M}@gku*4h(APcBh_cx zS&ZTP>+KFOuIho!d6cJc^LJx|Cmy(M;VD<#c5a-%O410=;2cg+vh18ltoP5M!mn-B z*V;3254giv|Hy=X!4GBid8x}$J$wwf=FrOu!oehja4nt=_6V(`eFRY~8Fq!Z^57j{{l)A={Q@ zC$Go&xLQpstpIE-`vhc1zH)++HlK>@Uew>58~{@w+jIz$`1pPsJslA?{J8LmXdLR#7$|K**#-^q@5ACtv-utYl^753B~PAbK$hx1exBm*uC_wdc==EcFG*48^~f|CO>f;Mnd zziT0Rl0hdLYFXJ`EgqNKsc*j%t1yJ`;lK?r^eykSUkUmmb1QKVM&c>HZKbCcZ;2( z)on2+&e&`5c1E&H|NmfYmU)3UAo-7v6=5IiT$=>x?kU5$1asBvuWR@R6U_7Oa%?8k zIli*4@)3%indx)poS1-0CHLj9wkslZei#E;EVlT zbVAQcl198l^M^-L$Km9Zu?AfmySJ3Xg%Tz?;PJM3T^p36k=iV&QB48)uiOc;RX%bF zayWxOEBE-V!bmFPI}u|BK*}uGtE$n+bTGwJH9@ID>&F?+rLo{(go-4h3p(F3+0fIN z-@ZEGf|+Up!vxa4zmPvQEBLC_bFl-2O;JUWAtb`BJXq1`12F5_l1CGcN(bM-c2$T2 z+tn#)g4W`~!-Ut_1arsba_w;gF(6aO69~ECOLslw#Fm%Q)n&sAExNOJJu7;#nUiqz zuV09eUNS)i=^Gz^7<2|6l+0KT+Pzl#0loH(huItxBAo&a?OW#NSZ3a^j){n1r zh7OO5(^RThe--i_@HC3wSwGhUY6M+W7HmeQJk0;7nH)q28e4p6I_y*PM5A9R|K-z# zS~oxu{833;c};N{CjJ@?P`bi_HSjp@p1ggWxdm7tTPLffEuh9|g)(f`Q}9qg(0XP` zAbre=VXxka5A+t~Z*Jk0Ej?Fb!hEH*I_0?C&ojn7z;GDz6-k=k)Mg;7B5X^nJv^O5 zc8sYM42qF#MF)fgLBXyV7z2`}VTG)M@|85Lsd`rT$6o&T@%8d>m`KS_)tk~%F-biu zJhWd$c_MS(JmysE+)?p2YY%69;9R`>02XmyY{7Z1du1g-V7>10$kaGUj~r^lpgUt@ zYm-hj;vZ#97MiQEyh9n2iUyTx2ALsjg`26AxW|mQwYQu0LD#LV^DG{3_BPJ4v|?aw z=y)#EE9T#?I4$G<`5+T>ZJ$lww{ACq2wB>xM|5f~M+n^)V}52)hB23@EYm?Yc{*;n zQLgZJcYfUSH#apmq-%Efx8OHvonQ=;8))5%j>Ednr#KlF3V z{u|?-t@8Tvl`z+dq@;#7CU&LcGbeyuGDim?oxR?%FKK2D^jABE-`%bpy1~HTL)m^F zZvWmM?zA+dZ%-|iSMXOc;e|P7Il~kFBtd6EqBPOajG-t_L8Eg;5IMn+XlL&|62$tW za%&#?#S`a^*%?-Z1l8~AvKQ(bbWwzx+;)@QMF*8)p! zi=CTF3$lZLcF9at9d1*f+mQX$HG!`-4@f)XVn56=l2<@x_UV!Yxq0P5U`VxITJrv{ zy2Bp@Euf?2dfRfDj?)i=W<8^XkR3iJSA%g*fdnVKxmn!LXG|p^73XqqPy=D)hF$6I zMpnR^&3;{aD*nJi18T4yuTZs|p#tvcyetM~tY2gos`SVF3N=fKpi`G)qkT2Mr?*IC zw*WcH*LDdHjnny+G5J&vk_a@`zMesxH(B@5tQri6p@@4u<0qtQ08M!1Qy5+Mk~yvw zoe53o+GM+@fyiDIvKz1T+RJe5;~Mc8LI4`OD7Y(6ds~e%-MI~geCY4;!dEkYhc2g= zj1RS_r&TjM^xK(#;D;f!C%P7Oy6(MuAPyBzv-m3U@G`Cu$BBUl*StGpi1+0*-_Iww z9Xp46Ry&>O(G~pIclXt?OjixkO?h0VDNC4&yWe0O*hrqqKNC#Bvo=4bv?ZVth82li zxqp0D6bFVz6VCz-PxeUyhhyu{@!r)K)e_jaHGf}HH77TB$_r}6&Rk&iJ>`B5WfDhL zw_E@+YFehQ)kwVmov4PUADttI-8&|YBd6f$h(K+?^3G*tq*`5|?lPD(HM?_gy4D& zshQI$gX*!1X|DrBdP~HtC@0>nCqR>BhbcS*CI77{G#*t*nUURKKfroRR2Jy4F|*Zs z9)Ox<#iE_Ly*8&*k94A`J$vf6Tq)SxSm{1}M$gQn@pV1u3Nl6q zYR~RhTjU74nmYXK^vJ4fy?MyA? zB*qq>M(29q1>r)i()#4}F$rzvX2Cn{JWU`o6~eQmTQ9U7Y|d@guE{xU$HjM_-kNR1 zva->2tkhLn;9#`ANN$%qUD9^R#NC94W2A|dd%r;WVx;I&bZd=Af|8>{U1gz>Dlnr7 zAQ}uQM%+B}bf+@LZ98~i?|y-r4ZdHt2|NDhk9FBF(CWBId)6H-Ub1M;8YeMw9Vcg&h+5zitd!LS<%!O#cqi{2S<$^0>eIfsotje zfxQ82Eh~2nRdJ$;U$_iF;lX|)C{A1m!gnTWEUAq-PWTY?VbtHRy!Mzvgwnk!#->)& z#k(E{>s}GDa;`<==A5XFdJH|+U?l(G`j&A*yykvf>&E!6J=b+yh{}&ka&uMU)oClK z!v_a#zsE@xsB5RBffmO>l5lRfBwd%*@XPj;@qP{()Mu8$lCs^eHzS3{N3M z*-D!Sy4JO%v>3vE2CMP*xRgi4J;=;uLfhQ-vZx6t{NM?jw6K&~pqiK;w2uw2NXx=)Q%lXD*yI z4Pn<{;qGBvZjkBu{F1I3^UZk+&k*G@k_&(&+j~7kU?+6MQDA>{nKtd{?=o%k{^8=1 zP8xtm_*6env%(UKm!`5vc7w^(zs1H6o8HbQ?aas2DV29yTd+h7cI3AY+S*3G>K!Qo zfUp|p)He!~AwA#1MfI*?%1LSJS^Z~e=)og8ce>)~p0OUW0EX}5DlK#|(1n%U*Vn&S{a1IB?`xuP$8v*o$LlK>wE?IABq}RR zp$o;F%=%uxVl|Ei%+`gPEqix&Qza0ds+D)Gyjl@+M%*54P;>oq7|!m?{;*ePLo%ba zD~TRsTm2D6%|ybE&Ku9%<7?=FK4||Dyn3rTVMU11(?&|xVkpmb`2v!Q9f}=NilETf zoUePPZ3BNQ0YetL0yX4RNeRIi+8c$=HW_Q%yf(o43#{?mc1)Gil~FF=`uN05lS2|k z=foC%&EBTDWBF8tzGlMHpT&)vz=2d$tg*-74)aSCBU`W%A~rbh8U;mfY(;;6kNt%i za){H!Vb4y}ufrZP4E}dJPb11vAIKs+t)xg-aNwtrC3lMpnE4~5YCUsOmw-?ZDeKh3 zoO)@l+Ee62%0@e-cIOR7?*HL3OU5c*`69(tFp{1&&^~_^YHj4fC7Bfzbn5J#JA~ZV zyV24fB?RnDIn>%26~V>z!JL6%VG%F6@#jZNfLf~Nww3PyhcmlCJYD%V-Nwz+{Ttit zmk0>>_VGYVuia^k*Uof+8*H?v>z4VrvBf9h6lRf>SYF)$jL879dhqpQPr^dt0S~J; zdd+-4xPD^rz$?zHdEDE~ta%qPrt55*J$TgU#RF9sar-6$UD3M}$$*K(-LJ4zTem`X zP5dmSEPN(8@OhF}^pr5nNj_$<@j@rEgd_7IqQ+^PIXhprQoO9%4y-4%T#BVP!i)^} z{4x*|Yi($MiFV79MRGl8xY)jW0l6wvC30z-P11_J!njq45-Mk-L}(F-uC#{sZs&~$dDbDk3q%E#e8c2G1;3n~*RRl2 z4kWnn)G~@HE!h$?$B9oo4i*1!Ew*u2?o`}%Qre3gYx^^-aIMwkmu@f#(C^+n#;8B4 zZ=WK%J)#e{f99>vpR2zcxIoV|4U|1R^4`fW$stybjaaMRIqs(ykFg8wk zOH`^$((?p%jal23SFzv9VUHBC__mi#@MPn5BbxWA-wNxDZ1Kq6ZC)U4gq>>(OW78i zW3?G)966vUE00dN=7y5YROOJb5d&L8P21K-Sb)XO$ja>et^^iI zH$)fR#q|>iBi>ckkEsQovn6OWW&_i8lCrrJ{Q&wi zgK{Jj#f2387mPtp;m@$mE=;Y&p%}u?2RRMU_cBGh6+zCVyvicL?@o(avwl3T;$K_= z5N}^=$1O`K2Bt8jib6>N@xow7$k*!|5|0cpfz~X@c2>Xo`u%pP($Y(7tNIXMQ=lzF z(zIUv>|$@OEHXgE;0fInyYYG`i2`(%^f9?oB_zhevX!_6A-_OCymhu#ils!fYf=GJ z9_ahR8XF4~RP=&Tn?3FTOosgtA_Uk$;twPy!Kq$J5s#3=MG|mWdviHW`F4@Q1S-U* zT!K4`SD?gci93u_L7z!^Q+=LOt62z6J3{<`mFZsVihzC^UC z`UnYnvJUmTrt$zllUuGsxj%s^o~H`m)&`ii7rF$nq$X#XvzeyoVq-hNB&NGKv{6W9 zz4hQJt2Tmqxw*lyjRpB9ATQ`z>%J1Ox2RAd1$f&o5@22FXb4|h)loDV?~(`blhQRe zJ48hHE7HQkk0qiG85y9nF)&z9$mdxEJ}e}$!8t5!b(~r%$;cFZmk|kW>m=z#a^wl< z#c6`tv3f}Uq=4*EDyIFboSK$D$p~HD&w))dq8P}tUy<@; z{Y1Tet-_LCzdAJ32AK)=eiZ`GFaK#WHciTV?bqHW@*DD;7_(S)`P-E-5=lvD@)^(w z9g)2XfR(TAN|0+Vlknz%J6P*SFKmin$n-nZhXahR;f$wDRSe`uzZ@;!>eW1;RjWF3v%S2VrGYtIQP ze04B2#UBL)0i<4eb(E2VdUQdU-rNIe`hA5*VVG~z6gnK`5v_GTsq{V;|1~TR!pmtI zIA1k-=dLl7OW)+S{~1kLfEcY8Bt82%L)Gx0?5tpq^<}`Fba)`ZUS3SXy%bhRcYjqk zc+8EBK3<7t*LHICFG7a@N{En`&!1cm6gcTK^gmG^A&F9LX%S(oa0tDu)a-HrSR8UG z2%FWme0uMNz9U?~B@hhnQls&0%dO^J5mHU=+cOaJkcC>-}p1# za#tF`|i z&V|s&MJfUW`@vc&*9w&VTDQ;nY+Zaetk4hsL9U-qInho+P=>MnNwfnN3cPsp{jQxj zGs>&T&`jh^k5#_*yijNJ2~Y|NEU(gL@Z#}JaXM{;=YPZ|+U_~H|Qtp8__|F_Rhm`um7|9&vUf%xyCBMig; z?e;+Qv`f+wrw781KUf{$6ns2hIwGTL&H|ixe|G;q?RB zL&EI?%Kx8^^%o;@ev{kF8IXK z%>~7S($vrwaBjlKrOQC!2mDOG?O*ZMWT-H-J_fKk2=C$-kN%&3cm>c1Zg>L z^{H1>5+$gp(wVP@JeK-7=9x=dSC!S93}u8SM*n*aA;;HXaC{B!!y}^6gp)~=M7JT8 z2Fkw}qeKAoKFnG}n#f7M2jB^WkI3G*;}Cv6CCN_7Wn8#~*wpk`9RuRuab0zRar;gd z7B~-t>xS%;7}nsO7h2Mx*P@=$;YbbdAhLItu7f_Ji`7*0t=FOm$4FyHWB1}w z4kKe|edJ;Q58+%sqNFkS`svBL`uT5({6w{s{Nr}C?p?8;KXd6P+I8isELYtc zd`;y3^m&tPH1AO66ExsihBHb{*(haty&Q_PaW5f!Aw=c4=joB!Drnz{#ZP7_m{n#h z0)`oQ-B$zNFnH}snes0jHQNt`+OX@MC;TN_;eU81o~~XBL+7AC;x+kjlBUgXhMTYG zqU1(#Mk>QWgsBL5c07z8vYO9AA7=s~XJV66VyptB$NtmgG;kcu)M9K*7IXGv5E_B| z7bo3^bgAf_&~W!pPJV!-2mb6#BB4XxBK-RK+#E4wH2=8>Q_>M%-^tPO-HBYAby~_l zLA>(H;~uAf8K2sMcc#WTi%nfPRVt%&{{q|oJG(%(r49d}vt z5fDKvYi6cqWC{a~MEAj!137tc<<0v=_+1wKQkf{ep5)O#m8f5g1-CQ2`(Gww!K$B9 z;sIR-lz2#DTRvW(BNc%`gk57|K{eD6)`WD-|DE^ZB|vN|Ko~C&qyImbsrp|xMeQK< z)yX!W?@2p+Ip4#jZ{+^ED>iq6@UX~3Vq$=fn2vTLp}p!SLf!%Ln_v-d_4jXt*M0F{&Ots@T$~<%zkUC{vEhB#zVX&?rFEvLNG-Lv zRMPA4&ySgQEu*{p^d}^wc?Kv6%jr7dei!HgX|UJ0am<;m{`pbIEvk?;#qiKDrU&56 zb3gT~D-EBetJK(e?oOBJQ}hiwKZlHG+$G%qZ!ue1@Z-fBM7ih7ti$yP{DC)qJp%L? zTD4b8{_+WIdX*fdPs*QbPH+=8E?@Z(Frw*}pxp!>=dG6dhb6-Hf{wQ2-b?e@^%)?Z zzdlo>6Bk=*WJns2!e)SdEb|7cWeHSbs9N4B91?UW@xa=+?WuX|8HN9(WKD)Gcq5Ew zETeBdF=v&hV+5UixVgA3Sb~mjRhv*7LC8-^+^D=gKAvO``kUA{C7=!^N}o(YF+0uP z7`}(6j>>$POT60p~hMV%A;1dU^Gk-Q5C>nVG%xwkr{FQUnc!dU@g#4jB2s1Uf2Nkxi-0$%U57 z)6KHrU026h@N^+t^1Tts(Uv{RYH1HBrIOh0e&TnFglzfs8Im3zoes-vZ|J;1{e3C# z#b*;az$r{mJ5QLV`y+%|@+_H>?=;069g>k1^>S~M)g=p}SjfO4<@VOYW1<=u+6f^B z7i`p0#srvdYUd`areLNo-E0~N(f*b|2H1Wek43z_tdbGvuP!;iTq(0beKp~S8O@VLjwz2Cl0eZn*cXEgtj^#=cwnsiY>UQk``*(PXIMTU* z_{@mk7bzgRKbWRXW*hpoYAmyU^;uMGObqA)WS+AM+M4w-RNJGfZ4a;rLf1LI3F~$QL8n&n-n3!{(!Y#jY;=I!jlV zLyNL_)6_D!6Uc%e!8Z8(Y5i`pJ+HXdwvuT!p(uUu5{!2`B}K=*WH8-*s>cY@$mwcw z+A-BiGVFGf`+r;XYsR^W&Nqlwh(=_AvNzRp9Z_Lv8}|V#-_+JIiv$ad_!4?Zm=w~P z$M>+*TXTYMdTBT(r$}ew-;=ZvAMvB1!8~KHy{*^V^Y*ACUzXy#RFOxN3D;<8BzIq! zWUK+;ajX&!%v$ImT*KT{$Eozc(yS&g)c|1orxaI&U zMrN>z7u6IMBN*u!4sz9QG5FJ^jqh!0Xw3JP)g)6Kf;O3lhlr5%id6x{gnTKIfutGE z$;hys@b|yDV}&<=VQ3%mS>keoGwyFdgLBoe*`F@@+1@khERu&^#>|q8aD(m9?q<~^ za=EnUxEPI*5T_Tb0-*a~RWA@wAzHFmUF#k0QaIjtCuy~@kdo`K$Wj8$LSeJxdH5f9 z5$z1^bhbAXV))+LmCMT^%STzQBMwVu70#R8pID<`n~bG)pItfyW)Bx>np1}852rPI z<3X=9Z*R}pA7=)*I61kvIH|bW=L8hn%3pDgBYJgxLQhxUbK|V2igDt9A}E0ikWyBg z7j_K)*n7{i|AV`8hA553%ESk~3aD z8>1ttUz?J+zg?D>yU(+bt)F?R)iP}H(mt5kkjHW|np9~A?wel}R6_JU5SU9qxwB`H8s@Z@m^i*qZ~RcV1lw;Aw-Cuk`J%yeAcZkz1af+n&(}oCd*~QfcVVcZ^K0dnlk5a_-4=0kQnBEg*uaB+)sBp41;CWy`9H( zBC$4-h%hc2+L$kt3}h3XU30cKTQfHhAv&c7jk;7+k%pP!S3a#6ZzBE6zqKMiE>%Zt zBj%^O6HvId?gWpN2s2>I#$Zu*y992@7X^vSt|#t6mKQig&##n5PP{flkIjhxF_uc= zVP|+t!Nu`#qQa8Ls~1qzc2rex%gw4PIW!}K9AIkMxZnqSJ9nlX&Y%x&zfH9zF7l5h za($15?!8%EZytVcH;fluIA1Y;14kG&yP&rKBLI5nIzH{Nz*?m4Cg8Vb@+&N_vwWh( zI2|2M*r!P~!(QyvORbA;X?L+l9t{J-C~b9YkzudH+dlRUpJqt-X zZkoND_Qu+RFNW4+H;Cy#tCida*Fb?3a6jq71^gE zZ~x(IeF$}f{y1}QK5i_FTiz(=ehmd@*=qk%7Ak+k3h7K$mhkf)&HQ|85b6lq8sz%Z z@nOnF23LQY+?R*j$stH`z-ZUoZ_e3H z2-#1K;SCKM9bY@ZN=n{06zvc6xW~0FT{-a)_UL(&gfXY*FzgfkiTg7%d8%O%^Ihug z&)M}#FHzL?O_r=}RNb_K4jtOTmG;C&)|<4fpi)(9&M`I7uQDhs`uC9GJHfMmcO~>Z zD_d9mBi(*_p!ah2i|wk=9)D#3Wjj+i{IFcBNd_&h=wGr@9Wi;~dHz8C1de7oPxMRm z`4!Nrs*!Q}$t7(gH zAOrV}qp8Kh>l;jJ_jba}@G~rBh6gh&Vtb`QIz4Ab;!svvJG&3I6uXv2iz4=ICO`)b zn~7=|9tsMKK&cJLn}n}TPGB9t{1LWwKWOlt+`=l+HtqEC7`|%YR5?&3?zMD|uxPsH zSI^$f=PTy=V%WH=LS{4Dm9QK$+4>g~u@)x0)cv+{iaaix)5`4Wxwn;_=Xe&h=~Nmm zs)rS=x%7POpSh74PUykJWQ5kOsnT4pW^FMy9u&^9@Wd?GX+QkBr{= z+FxPb)fakHbjmvE&(TJlVz`{H)i$1?grA{g0RWeUpC6#w*et2OkWCOr| znTY-?A+{9v`XH>k*XLZ=v=o$#VeCjZp1a7~!~<~gJ1WaLX0k2KsKqc!d|3ufwX`91gTyYQj< z6LkV>$kNm}c5qUBNu;#n!}H2js7nNVk{pE_DmU;1w$f3R=w@V@KIV_TJ7d|;|B`b; zQAEjxQCl%x`e}8CYuWbP;-XwkC|k-ue7_p^rUQiSEm-KUjaS>g%1_7&@yQmIlv$#w z;@-GLEpc~ab2x?RZo@ZgeM?q`>StTY`)?@_^xPM0**e39Lf0%lv0t62iyf^LIVC^M zlcI5UB!Kwyilm$GUF_`C*r^@rZyvVWZlcd_;zz&hie1TTAyQrsn zRM%yymXvGRyh$JGTgfNUTMcKz2vT_Jw{BU*+I166ieZl|=y!xHz5X{@>`L|J&oj8v zYT5=5Q8={TaU-=JC6$x1kQ+zT^9CYSgqU5xW^0y*if3$=f~(M zqULiruxtAMayqx2?@UXy{V7Xr@>`tx(-DvX&v>iXVpA2%4k`>-4aNg{Y$u~;$xbb$ zH48K{b5;{mS$TDapx|#LS#=Efywnn@|D1Q}0g~GHIlNjy>kQ6p`ZDH*+w3B@j>*HA zRDOSv%GdP=4i{Ey;$?FqqSl~_xeZBautWX0LSVWFI8YGu>)%7_lM6l1DORo|r8F~Z zoXQbKka^Ab(lM#ttzRDK1rr-=?{O#^>(R=K2lA{ zrTVylsQ%F1X}q4iINDgZ!U(%A+xg?*-iqJCMSY?yf0}Y+=pMq=MkXh-0oBSrKHIpA zTR$~T@U4C4yLZ+0^w&6dZ7n2fyw-k@JEDuC>GJSHW=;aB8Xl<`eSK*MuCBu9G7q5z zve%sSmo^8VGAPPNY*3)$hfWX$Q?S9r74-*Q!X( z@1j(GC9b^8!|{8u$;)o=*IVel{S}P2qYr*Ce|0qzEd$-|xj&jT0z2N&aG%^If;Zlq z%=zs6(jVI*<97r<*w~djqvQ2lqR_yAbx*R`>I5cLOpTSVr{%$=gbDcJKE@H_H=B~5 z?}5%=<1TlKqN~J?A5km4d0uk9k+wa0`}L^n54GX(pDz63Qd+KXv9@&ljzvv@-)Pd z)8L`VS2XHvj^oqf9R4b6iNL2h{6=oP9-#%TH-_7eko#|E21H^{NXQSqa^4?FTR0cm z^JvF~w1(;p7~4?b_oQ?6q2p8rv?h)CJ&Z}?b;Q=L``>-ku>|XRM{rX~Zr+0waz|%> zs4Cn2Zoqv;0sRSQ-QvQ0Ml1GZxR}nE5OsD4e}tIQs!&{|x6@TQTmj!L^6$8p8vzM8 zyux5EG?Ky1yMaEpfMf^|ccYT{kL;laX?ja0K`-Gs@uC z(h7NcH{*te*i^N33Z;bNNX=QQM|bTTR;ZMs%PuFGM1(gtkA4!O-X5X9_uza1cjcA| zL@^5cTUiT7?>6#lI>furOV`9^3Tk4)K>LKSCZ_y6e24R@d@oL)?355V`)`tu?9H;n zi@1o^$U|KG3wrhUx_>2!j=7L^PA)Qe-uO|flouYMof%rh z6ZipDFtTB*G?u_g_wM`pI@1Babc1Cl7cB3$ebBFWb8kaZE6S9g554-9#n#n-o>apt z@)}e$$@DyFu4N4{{f|9g2z0dNwOhpXof*E!>tnTUgqWLZT576{Z|yGbh1=?6OTU*BM?QVpxzyJ)to} z<|a03yv)J2|2`~IHUeWlUlyVK(~PUum`vm})M!_)9FmSWnuRS-&p9_kb3 zGJlV{g>d_yJHcm#~(jcW$TYO=kt@;x9*J7F<#}sH?$>> zKaysXS<+l{ zBw`5#(+`5bc8`Ye3RYFRT{^dLap$!wXlghU-%a{@00zJzn7I`+^hjcx-hV+Da4#%T zT)nO6YG_2R3WKCOcJSCHy2cNHh{IsqXVx#~w@P<_XXy9{Npb7kuIn8##iKHM-J zt5zC+)264z>Job~Y6AE7ciN$0@z!Y70-&E!KZwr0M0Q*HS)4Y1LcuKMV)NHJ%5@K- z?&&3Z?4)hSrp65N(M{cq4(jI=IW=WG+J)~`+&c#c5eEx{nteEXN6U^y$Gxq?Nt(Sa z9j`y&B{jv_0$oJZH5x<<`wwZ<{_S)epTU1lM^*>!*~WF#jr{UAV~-MgaJaOC!Np0l zQ)b#Gw{v;QAcm|u8vfjsI!@bv`TV`MU&dCbv9i}VT)4pGQ(DR|15lK0&-1Uw-3t4Y ze;lPFNP_escW)fuK+8R& zy3R~35eG@4r@P?2FQGXm<84mVvM&yTc87ldFgNGZw_!>S$1~S|_A&SYs=yMEhcSoe zd`6$r5Uw02mCGmYZl=w(7OW4YtjFE`mlV|6X_}IP(|d{%!Hm|edyc-7fdl2cD@W)E z9dRf<`5KP(PFT$ymvMc+!YjITooVtDgFlz7N5=iz2u(FRue|C*3CPaQvvr-O&p zCs%OKmK(N%$?tVy5l3gs>IliB6)Tf7Xp|lsDg+$3jTdW{p?2)@{^2ic^YcN(E6yO@?u?5b8d%1cEd7$2ab{PZ=8eKzZGoS;|1tW1A9?%-Q-nS zLLbY?uXDQFj<&Wvp)@eV8czYg6`u=#u%E~^@|;uaCJad3O!?=}RaCzIra6<7j-#;i zEeB)VBdi%V_dkyD=}NuOosAwz`A+93z3Iv3ot-Mn`r2m6xY?Urd~48V?>jXc?rP{J z9RC2P*`qJ`E?2*-v6*S(u5*>f6sHUudl@sxl&!c!wa2t0D)aMA4xuj!q)P8pH`|qK zMzwQbtt(&mz)ed^c1%M#t-sVYSjI{4+pNFw(*Ks8-+%rSllUyc7rK9y=}p{_>XA*+ zeT3BTr<*xB5W)7$zwx^1_P5!_jv;%!^$}f_5_5`@cGiBpHndC^UA$!0Df+}``!CYp zZ@Y9YTIby7MK-mq5bE(HGRq**~rOYUaNsK1D-aZQ_l# zliqBM!PMhSY1=m{i~MQ{nlLYkU}UB{YME#o5)_XW{Hn6z8xG2uEqw!tl++R-3pQiK z+**h-#!S2>!XxrD0%sSM!qu@bjG78@(E_iPj?$Hr+w{Mv9~WKnm9ozP4- z3!0HayR*h7HuCk}v0vwQdmgPnq`1O>XjS+1^_7o0dnsu1j*K66aN83=0@meH$%V^k zK_!N>OYi8dic4g?%4_v83k!s|DEjvNLOdU6nj6nYGt_`*Q&*(Rq-xqyi7ieR%_&B@ zUe?!^H6l2HgUZAJgjr4D?rTG#6FwZFrd65pO=mBR616PM;O0rE)Z@?&yZ&gX<#-!P zeTyy#v^8dJ5i@tn+WA~~B~HzVyRMNvtv8lyq1}~giM{8^V0fGE0IK3+Bj+<|x4}$Z zd_+Kv%^iwH?E{YY8=G$yVlT?DckExC&8iy<#!0Jxy!BlfE7tGa=ih>~^IjQ-KN`#v zi;+h@FNb$wS0tV`bh%kyE>e#^>s%oD6v(O(3;|lz8`FNON%EO&BRu)D{DVgwF4rBN z#3BX8HH9*M?2ni4P8~@LhyNtPlkb^PVkCb-r_6`;f?t$>*;+vKA|1Q_fw<2$hz|LD zITTq7S))t(N%i(*(|29G;fmP{?qA2KLOp1eyZRzZzT9B!#1wlAXm5(ov3ub}IVS&8 zC4!fWI+dkDrCLQnPQmx?!&PHYP9@{M?4F*QL7w_MUFz<4(M{q}3#>gY>+7I<{JN`` z>2k{pwR*q7cAxIBHKzS%8#kh>?3+vo5WZcSh;XI-MDJ6tMkLlUVmuYI%Wv?GlOFm< z-o$AG3#w8!e3Hg;)}%BZ%?n#UiUqB7U6N49zy zJAO6$c(cc4(O{4_i*ES**?X(n_d4)* zl#|d0%O;`?`~7cM_SckX#(9+n-iVwhL{Z2$Y;DZhLa)0$+%a@h;?7a{bx-CYFE#qo z^=;4%`Z~M5i17t+;B)9`o0S#Y=+zo1LA~TdL(A%ElzeyIezkk|ol~h|Z1E1r<9Ykr zvuaUbcz>+9F5YpIL6P#V8gQR}ov~HwBe-z`INbN|{UD27L{=Nd9VEgbvvAgy{rnIQ zOy22TMG`5R2zj(jQ7~Ut+8Fy=wL!X08H{`2INT+Eu1}|Eh7On#hieYmM3v)bmtossAXJVF0G6n8Su+G0pec3R3E35Zv zjt0p9M|;bIovEvQ*E}p0dyWGSmWn1BKGWB;!^vLLFB~RGA>Dj5wxFHZmBL7(3+{D~ z{w;{2$(hi->20ZgoX~BSfU7CE|3=)np|;X>gxo>8KT$}g*RsEXpe!!{F;D$(;ONW>dAU1rXvDiR~$VV)5_c1UmV z)uWuwA*R8Sp}&Y^Ce2P{76d?iHWxejMovcK{874MIx(#C)Y8v)wu;HRADcJf-x8&JHE^DTIZ?Z819^02CG7@ZrAT*A+?g5Scu#E65 zmh!9z4v6>UvO9n~e&p!1Mph`bjVVLS)p!`^hyS=P@GLd@B6f!weKj;d15phPEs>P4 zw=HegVx?Oab7sfP!w#vGzOM?s!xnK^Otr2p=53&E3!DszoB#HD1Q%Lj zq?n*O3XW8tSK#&W?UZU_v6*TSVZUo?v6W2bcf)=ik5aLb*A9`wRK`)saUWx^z+Ez)gSsB*q(o z7u@R4&F69Og5nn~Ond#)@zfL?dEXGoTjmQ>L=uG30St9Mf7C{aWuDNH2>#Bz{ z2>gI-mC<=%LMHtWZoLo6M|_*P#)T0Y8kh;+?gF7H^giNnC+$DnQ5}-5vmbf5T8AP0 z1S9zI9`JJFWh;C@L$ltRsHoq+qoR(Eglh~z49(p?4&k_SKmzKPQ2X?*IRbto!dL{TEQ#{(Eu%dmH^vwT=GaUdpZPI3C)M zlpJnij)_>kX~)ucVH1#tCmauvhhsnzf-B&_f&iKxU;h;h(Et68&D;rVB*^-fuJc-} zwoej8cJpv|bypcNgW3HfBx3<9qjHg7>NW~o0pFbtW`#H*m zYlEzyv8*L9#Gp~YYP0!vycRLOx11j?q9i41Jsb&pp4O1!)K#TxW%bYAkTx)vuga6Q zE>2y&nyae%MDv^>3*SlVPN8es?NHscZJ$u|*7vt-vwVi^h*zah)YDKv77nHZt&q|P zPtde@(PdQ;#7dp?lmwDBf85`yj#5sGVmtxmD)zmP8B(&;zmeMJqsN-XmntOWGT-0z zM94C9sWeoOqveVlw@y4lWh3S4Vi{NnOxH2NOzu8&vOwJ1@<_!-g$uVJkmntw4^K3h zxo_fk#>N&|C?GW7UVusI!1oxprmeT16zKeLVONIW{wv8oayn)vMM$ZK%;bv%UFDpYWUI zBHCV6Uqld2=1!|E;XZM#dGf0|qO61yD*Ol>#zEe4!)X_zrhB2;z-AASs3-JHaBPV6 zy>)LACz`5ui-}R+!NS7R(;{nGJ$5$Q&@u7;sXOR8574mA`(Jy2Xr;r-%kB_%42l(u z?L|ED!j<1)I6~slAltp4xTKEmBasNGdW&NNx%Tk_ZnHhg$|;gMGja7*3qNDn77#tY z>o#s1eV;QC;|iC-ozi6q;_+JZYrW~y?SR*R)FQo;&;$1=+^5v&)E%O{9B`jkk4F{N zuv?UNxYRCu3yh6DLmqN~)hBq`$M@*&HfoR%jrRx9VnR=MN5XN0>^s^metlI|=JUni zXUMSoJoLU@YkfJcPmznO?5>N23m^tK8HfD#c z6i(ey2r%DVZ9}ntb!sq6T4`_B$i2-rwNN! zp22si3O^-Zvw0+X-2ursfR_u2k3tf}ryQ!K1tdx2*ypp1_{mHW;-P`Gj=utef>Kg~{qFPNSKGtz5LjP`^WP&C z)jcujej?x-Rn29yirj6z#y6M6smyksZ7mzsxvxanXE9IRi(!rI_dD3Buuk)5jBIMr ziev;ZKa}#HZd#J=c9=$U#SNVZA<#|BfSxApy-}Tl!tmbiU@^O`dB@JRq1o_x6eAg_ z-b%Hh@0}0j=q00bt10E1;eY@BCiHrP75zk*G~RLXFAC(wAXjz`DjVpfrdx6XId9tfmid4ix!5Aa?S%{JpBeEvK8Fp~pp1)omiZqZWt zS5uJq3I&IOcDv;l$6+${ntgn49lyMTWaAkTst4L(9u?b=9xjq}aVUJN!P~=YXBp+7?4i&dEaXEh(xIhyeQB94NU%xR zsUxFZGs?O1;_eG`@_w3P+^=t|tAS@iQ#GmwalF2-?-zX7UhHL41vU?=+mAW*HEJJvzu4nx1$l3C*JqiJs` ztE5Vai|hI2s*qKe80zVMab`6ZB2^U<0_2abrpxZ`n<*Yv){^ILgx1|$K)>dE7paC@ z^a{%GG4!-}AL)cP!}3ec878I9{h@97>Pwyhtnz0%Ok^|QDeJ_y zmL^JwBgrYE z712Tru##30`agMln3KJYWRP^494%9f%*kPt#t#2&FvxsERKQI)_$1f;`xrDof5z-; zOU;Ih#>HbiSt5kJt)Fa?`79X3^TATesw2IhT8N%%UqEzAl4i&(d`qW!UIW;iIwak_ zb(`;XKX&lqn{qFAK1{KQZ1c;RsbalDp%8=dEcD^xr4L0+A$(9j83x}hSfoyn zdULP8`H$9wRNbV<+ls57jx8bhYwRQ$NPq__L&x{SHS6@orR4Iohq2AMVrx!J?jAxf zEo3oxx`#Zd+FiqvG-&DKzKhoQ!{nuH;m(z6ZaBI+5jCE(*2o;OJ6@F}3NtE<2F*95 zbFzxk^a&f$`$L3P_2=_yYmtng+sbFf!ZSLSaF>uzOsw;G`eIq=NcaZZn0+qR2BEyk zw$uEs7N!vrC#qg2!&w^loK9f<&8l97jPPE|84}C}x?4kj$JHIq%U|BX zx45>F#L(YtD*WzGO1uTg*VzM&4z8W@*lH**z`VVxbk|LlqUC`qpf}u8GW=lp<>dkX z?FF|dFW+=0fh6C0@k7riBOt-9F&P_o73-pn*fi}^c=sOE-243-7u+XS>wu!uoALdu z)nZ`-?|GrUV?F+)A=6=h+__?XsD-hzu>EcL)9fJ_+l5Yrb=wvF#-QjS3x9QCV{E-_ zu*?h3?O9nS#*!(YxX`Wc&6s>1yN~_|VU06T`PkH_Hs-rwoE5tpNj-#}doT>+5mh3* zx_jb@w!9m>tG*Agsv@IhX^PF(TgEF4=eHHaSH1xp0@sr~3rcl1ESOcD> zbg6eew!w{cUil`*(c9b|YyzUUdCmLveg#KQBNe3lrdlQ^!|6Aaxjv@-UWaFDr``A8 zG9B9!E0n4*P}G_6HSu)=>?f0@VaY#Ks0?F8h8(Yb6!9%TQ1LK-KZ(`tpIbrS=`oi0 zyq>wL4&RMQOjL?L@OvZ{(U8b3SoT+Yt8Q@iU_Ul?ZwoM@I4u~lsoaW8l4Un49Y{UNgiqYXy2>|i6=3nxicj?h)uARWS{V&&g)WT z32G+F^vUR|jyK*0(q2LJD=GdRJH}44JsBA(l1btrx}}e5bU+W~zP=G-u3g)mKb?V( z0;Yb=ji}B1@)-b2Cu2sMK5|i|(+K9bRM-vSx9NgRAHEGVs~ z0OtKA2COAeJrnA#=9g-|7P`zU^ifs4sKK$}YSgi(*t)`7y7%K( zBE6J=$&7c=)rLKe9i{?ugq zq1)Ky7_>S@^jcOCLNPXQP1&0ChMy=&FB6zH?GcQx8$Zd{8~h}uq+iJ(a#Z)|Uqw8~ z#NnQh9+jh);$Moxaa6EBiy;~KLYd!q`iU>2l&+rF-}DHblIRwx9-p6kT3b<8vY6Wg zsUsfm2KrzdUi{vroJr%9_OgA!IDSmRhoxo7cVgaSpEOuKPIJ(A#Tm$h#nNkGT!*(^ z{$QjZRP94_LEZS;$@N!2&hZl=X)kUmi$JMbw^Z4ESGSD;)wRfx6ag;2nWY^p`Wh8f z4H3b4W+|zp!~3)lB+HK9z=3LiT*o@`L^>kz#94wAf@lH@a_(X5UE|nrlG<$iJ$Qt@ zcE`oJ3m}_h+Q+?G}V%6mEir=x$-;W!lFbqzLQbOP_wnk z#sl;A_C>4C;*!H%tdu$Vt}3@xQ~y0FK}qie{GuJ+E{7~|tWNu!2e7sObVK)Il0-)Y zt(tYS6EFHtpzOF9WHQ48)#caOx34%bHv7dXe1xeBHWZ&(KYbS_-TZLAF{ zJF|ZiJ4^KczqN48O*y8vd z!G4Q;P>1{)XX90P<2RdCI$Fk(L$a*Wx7x8vFbUG`9m>^iM`4W_=F)cDU~U?=g0ZM$ z&>9{`SD<>v)#DipLQ3fDR{54F4?F}ss`DalG|-qv>DnUbdDBt)&*R*g&k}D2m($x9xp&Q1=JkZZMub%4-_PeIFnxfOn4&%quazyHxZIxh|T*AzU0HW})$ zc$Y6^<;(BiY5R7bVv;EgO4sa%SQ~6Np75hUY`w*6!_q&$k!qfZU({P88-2YuA1J(E zc4)nRrykyzD|3h+t3b*+)=~CGpkEZv5F{4ChS4HZkroASB1rNyg)YDWSW)X-U_aGr zG0DVXtOqb18lSe(zWimE#eO-HR~I9>oM5LQJ7g`bo9Ap|S~=FkXA>U>0?>P>-{H2F z97=+=8xk)<^CH1{mE9Q9qV?`I$o%sl_1&Vv^Rj8oaP&L$okE`zH)N_DcpRrq?43bX z+4gpch(f8NTYN2kL|BqO^!ok#vIncZy+ouD^fGeni;Y<3U6O;%<*H>n)|!$ApfhK* z`&511sH`A!JXVl?AfSLr1%2DAV2UbGb-v?ottw=||1|3{^VVX@qm;HwISUWbDv16- z%b^AiEyI%5Z0}@b!WWJeE(w#-py${2@5}Z%bC@mO-JP7^uik6teO^S!EvZ`R_|uu= z(OpokIaYjdt1gS;Lghxv?>U&n*eOn|br_5}{{G->+v;UmbA!TV2cGe)Kel4MWEfXH zFfJ;6UimJ_2Vlt4o0Fy+5Y)g6LSPfBy7A28CvtVU4iFCa944}J&N-!N_$$5GKy-o9OJmzcmVA>{Pxn!S4iEDaUJDXI6NApnV4itnZ> zEA!S(8vFU`_VXG_fz2D1YC`7BU*^PDi$w*TcGsJlh&}6-v-XgmA1(7q2l6*f`Pg}4XZ6tEROYR2gxc596Md=TF_F5;H?nW%3W&uOsWU_kr#{m zF~c==^fV{u|0>5@lLo9mulWd0ssuQi;DjyRQ@S_Hot)|8jNoOz5#Pe&^p&`u46>FZ!J^^Y}aW6OA9ho8EJ^-e5} zk(qHlJ&kKBtFL{1wiu*$|0F3!R{IJEmekAY(PtMqx_;E(jL*w18!aVT_N(64y%}!$ zMs;SvaiU>iWaN=yl}6_S90(H);Tq($j_RQ!xxIT6#gscL)2$$kiNSM=;5LC||17&Sa?WW}bRl zMdld?tNBX-%!Km@UkB zgIB)nE<23W#TE8%R)kW^BiniXgvB2RtJ|aN?I-?pgCZ1SPK&xi*4)(M1z{wGr(lD{ zQVZ`=nFs?V9F~})fzAPa0d$3_xpfg7OfMSXzq)YRQLUOvmoubV%a018+xAyt;D~K> z{_R-Rwo8BHO*__2X9~FOC{anT8SiL$ z=SVd?@Nx(U@2iUoWlYBpaOjJ)@p@`{rPRjq%?;lykEY~F9a2jhDa1qV8_e}@C^ycB zB%HWmsn0tE3(CN{m3v(Xo9(BUzgr(aQUFS2dNf$S~G2c=5E4d;aH+{Kdg#`Lg9)$gH#V-nTU8qM(gq!8fSTbN4(AiTuy^g9d~hVRt*$K51+(RkG$~_@RL-pZSw~ ziwVt5nKs;^l7FwW9_)b8yVrlRj$0&;6ABIz(YBc=-WsaYR?8OsLJXd4OZRATJ2tr| zu2yurjI`uvW_Y+T<^0V*k+P<&18m9X{my!PLXdT8A&3_6rQZP8ISoMA66%(oIv=qJ zx*dBB1pr$pDvWa?ZxlM0eN4&eI$%ts=v=9{8zG{Uf0#rQV+Eykqc|sEO@jCFT`DfF zeyYYhdsg1>R7AdJdOSa#V=ZpGk0T9&sRvi`H#qsDW!`S9vyK{$rECrfz57%}| z#$)d>8Mun5>js^y?G!bl00RCAIHzEM%mA%CHh)!t`O6Ue$|!a(WYT(#)n^M8N&~vs z`9+2rc&t}vcW6E*tNL7Rk;{GDvAjAm{YjsEL{T`$KwxZPx#r&_OtSb|5~gWC$m#j` zj^N9k{)g*iWB7|&BEjit|LlmN@nOwbB#omoRRj^3s;iR!2u>WQ5@ngCZkSg8^=u0o zqwy*Le?OhLKrau^Joh-@VCvt_SrlCWGsk*~6{MEyEtpvk0x0o|h(C9*Rn#U4HiUc; z4u9rB#?Hp3n##?XGZ4l(aLP&GLrQs(Gt#oI;3s0poo>epTFR^?;@N>-8!6tSn5tmM zOiNR14tIF{3JA|5jRb;cUt%*IAwT>9*L;vyhFV%!F99MY&qDFwIj>`@h>tS*wbr_E z%Iq%o0rz?8*zU{=>s0QL&cWuhd{j*gHuT7~vv!T%p&EpF z{HyGe$|;6!3^-4iEuQEOvV4b|ZMx-#UmV`U8g*)|u=bAyal+b97vL4FK(e66;Xy~o z{+_%)52BHzwwX-s1;+JA2TXwd*-It_0Wi50WSFb_AB5-j@?s4!o|k#|Z&z1ysE`nf z!QEOO{npNGla^nr=GI(^EAMuw$mk25jhlWQG$eXzv!U>8W22=R`}{KmpfyDEdEks5 zHP#zb#l!e>N%TwQ&!W3?g_3>#=H#C=XO=f5mIESJhB+~%Q65PSc5Gn zL5@{V1ug|hTTbsR5B!z+%#f_E&O7qqPw`5kf7P_27QM6689zH)=N&er#Q0->xp5Dw zbR}Nbja+x6u)|kn-h_-Y7C?<&3Lo6_kpM3av|mjA2yc?OePW8ClO`6WqfGlIt`N3t zh=`ERGXRh}0CIOTccln64BWtEoo6WiNx84l$NCr#rZ;Fe^GUmp)*FT86vpRk>u$wS zrJ;`fabF*z|44cAOsIaGPq=(Rn1C5Y;j+<%0=s)w>QkNIq1e;RHIHg-rWXc-Ibd6IQbGB$0XBdKP*Hike=Bi}o+ z0iox<;j`~Cum*b&{fHhcYrg6ZXe)jcVMc%Y5(Z6WYJ5L56e(+LWaOB#_-6O#Bj4p_ z#v&PkXKKRF^i5C^2RC;^@Fpuq-ncYPi^Yan=MVs>drPV=C;E4lG2D@Lc$(k%@Tu{v z`Gsa&DqF|ZooVKWc`>XZo-n&dBE!qjGLK?|K6cigrH&3orsASXgYS8=L@kwYXv@uS zfpnL;A}c9Ap>{vB+QQaxy`*oT+S}@TZ*Qc(l<-GKlfWILr^2K`t>$E<=zwM$L?4sY zUNSDVG$Q(eFDPEi$VeBAS@ZoB!2rTT?(4g=4w}!89KT*JXc?ID?jsXZYu#)`Ulhno zC}0lk{>XtN_C}N)5w5?z5ApqPt2_IJGs5rDXWSI{5}aR_sWeGj-Eu>Wbbn{I>9hjA zi#*@_^6;Y<3%GA448$0lwCuWS?yU_Di56OgR?X)^tn($V)b0KG^79`p%%Rz53RSOf z5OINIU#aTX1uKjlHi~^Y<&n8UmU11??3tW_`yer3cYR?rQ00NIjn#cPC#ZQbGhdI| zc)T(mM$2G{1c?!R*wu0!)*bnvIr1r(R@`K{-tOd3`7>*f%)uJtj(pVi=XCpF)ee2X zdhaxCdVRRC?6q@h$j|TXaK3e1DP@MBw<#?_T+GQg^g9pC-yGn0E)@HS%zj9b?uE(5$ zTUT5eM^!{gw@6gaFUH}Ewbc&h=*`PGWd_7D!|k0CjIPZ~kvw1s(SMK}KKMB4b$IxR zdmejfcE-s)Cidn#c~Bh*^BUL3>P?~q`%cZj@~11b&Gm6VFhDfpd#F5K%*;oktLm(O zkjZK90;9zLLzUXFf{mV`dI@TFl@JeJeYhzomNqC|eLe&t7?@=o->|?XGxO!J&O>%A z(>@HpFpC|_4$gPE?o}YapZiVszQD_OaWJBZz1L!H14JUO!p-q zMzUo3ovcVQ$lx=6LYQI|0ks8txc!E;J>@t%=<2IKtugT4d&>dS=)p(259}@oWc16Z zuu#rk62>fYgLB;M4)u_>_Q7^*Y^<@NDIw(V0PJx0i_SJv9$?iW>$#cUvCKyPM^rHp z-fH`fKm4UxQH_h6IeHCAcR&z7pf>RY#3mjP2lAR%r{0tMYm{$2TtX9q_yOUd1t)>M zFu9c(C7aK2-X2h&yqulOm-+#9;RCQyAWKIihagFK1e2`u^a`KzWMfIDJRpPIuq0g6A@G~+0^I+jYTzb(a^&G^XW$=uu>nc1 zsw(_~sAI{D37kS}cTyuM$v}5|ueYUbgjV*5@Bm2xK6Nh)7*}#jAvU{+PE!sc=fN zakT}9qj&s=jm3KUK<&v#08ymN&Ob{xI1WtI!RR_~ISHvtG4u2KmYs^lgh)7ugf5f= z9->@r81mKb9QZD9ZOJYX=qPz-jl<-t7R?N9wSX$2bD)ME_#XZ<{v8g2%6$LI!HXCq z>O509qlF0*ixW2UGPkokJSZ#M9@{Kazg9Ro3K1#dbv5fZvhq=i8z36qRSJ;Uoz2x@_}7M<)m53 zp=HNHl{naG!d9Iga-*hg4PK4|y@QI{cH+wybEzY@7r}Phjinx11I~&=_`&<*#7*ahA%P^zhimMvuWgwgwKq@o6Jo;fA)EBvR z6XB-M{oM%B2-=}Q2{I!9eN~AvD)8psb_4^!h(;-tXm-Np$F7sFc2!3P3OZe+8qrvN zxS`w%?leaZM9+ap8@>7kAimDZ-Q(^&0cs;QYd5>N=)iiqn+02ltfM9q7N#FQW7~yaW|asMdA_9WY#d}?z2bLT6PW>N5$Ss zk^&|4OfNT8n-T4m?e%%k5G~CLmOz{pQ0tSRH6c%LJAWX;a9u|P-8F0>0GW4ROw14{ zVETqk3=)<8hZ+A$F9{|*GN9Jy|JCjzeCO!eR{Xz74z+uD@t@yAZvF3^mNf=AV_(^%yt5M zBa$9*@hd;`aS}x4Fc+Y;9bM@L#J`Xs#y72xh_%lF-I;jd!+{V7^+c|EJ!0{gKi>a4 ziW9sva+2#IN8iaOI|9^bo~Ohw@EEowW(Fi||vGMBi=d9H!Jl-NPPB($2}?IgX;kY={f{P9+B4yVYd)^y!(Twp0se0EiQmG zZ1gGdIjCgo)_42>0&5&n9{^E=qKL;*)Bj+9kBe(-1#a{rpmvT5Og;qCf0VgO_!qc{ z<)Q;*;OMT;EG$2Mu&~(2UrXnO$`gJWuzpl7bpm#M$Fv4IY)KC5#YU{}lzVj|2m_mmHR;?tT%mL?&r{ zBj0s)My4Y&yB&criK-)-8jy8zAk83y4n*axOBRtv46 znwqz7Yifx6^GR_cCMx0;_Cm=Q)GXRuoFB*&8dEIaux^dW$Y7A19Du=Ytw#LAZeUS^ zS3F||7K)4y{#e$l?Z=Mjw{~;{o^f*BnJpShJSJSu2eBNJ0PXUW`_x*amVb&1SGriQ zlMv5WlKzbzsCR%^$+qy+7xje`dv$OQiL|b%ihLeIKD}qZ-NW+ciXs{?r{27el>$O@nbN zw>y4^>jOhWsKR-7%TVIR($QMZ!Ecn1HB=xGID3LHe_&c*Xm$~l+^MrOs z6>9|7J?jpG4yb%*h$W&n5Z)14C}qcPa<${W_>_?ER5*(w+VgWgwVN5gt@nm0tX4_= zt+Bui;6P>$%U|Rsx8_#Q_76gG8T^=%vLF#-HWW-vcEilN0?wm*svmsF&wCBIVXpvw-qjCEk(2vk7EWm^waoPex{{9mb%&xu+0 zuF6kD6+I38LZose38-CiGB=_^_VyTzNQRVs%W!B*Bte?*ep#M+!Br`c6U;Kl=DjFj zWc<;q%rJSvvpXRy0?B&lHN-GJ3aiRC0>!Q;s=Zv;V7)uDjA{2ol5Na8XfkodEOl9w zdc)=48r52vTc6215X;gt$gCvt3MqLieba5VWrL`BNQ`nKK4P&U=Z?aX2! z1vaQ{Si$@D_~J_`J~XZiLLL1-N{yk_c#{n~s0c8b2tGFfGKOg zF|oL8^WQstpY!t-BPTZ|WX_-{l`c`N$4IlKy#Hf?v=e7D_^$*s+gFO0-3X?c%vh%#`sJ$#jagPX2_UvcOVp}NEtdw z0qsZmm^*1F8QMKHz*2i%DwoyKkv;HqyMt9ld2Qbt9QApmtu3=_V`<4FG_3H#nVX{d ziY7M)ikN?}fKu?{VgF0u-U2Rkrx9v=y{1-|%zJ-hbrrO6Ehu!Vk23lkgA|r|Ys1II z1!|LZJU%68)me&O8Vz{>d3uU`Ch%3}jC)~R3q|uz`*7vrJ#+Z(tGjtiW8g=^yYwNXHw%zx3Pt9YI+3eer2l44G=>9r$P3b4eLzzOQXs#4? z`|Hm)ha}ojN&8`wZUq>LXod~T&Wf^VP#wt53o0TFk`fvo60|P;MyRjK!NU<8W4_D2+QbxhGutRCnsNi#T`AdM7Uc?U#+e57w35xRq}fqV z;6{Q4EtIanYS5}N?i&lVTI4a^0*2CL>?bjGNmUYDk}@3Fx05KAvm8D3PoEDfA>(q7 zgi3E6!MZAS(5ik^%%Yhsb?WBr?g<%D`uH?l#G~IawLrP>>nvggq|6K&xADR zp4};Cq0FzprRG={TT^Oiahc<&%q*(dBkXf1PcVziBM+q98(W&wcdX!PB`ZZOSM>Yp z1{r=ONcVFhpX636`UWXt+1vk#(?+{4Foe!gjTl7N%BC##QAy{n*$q%MJ(a<=I|}Hn zOE?`=)O@s)9^)dl2IrY$yt5|5Af^lj+~Ong zmi+hjLvBZemNRaGUc*VEhccBm#{@91tu}*R)N`jQR^|COI8BO9Q`_GxC}g14jNSBv zm4~HJ5G5%1db$l1d=-teo;g03h%g;Yu3pw--o(uG{@d9a6Q3Qjo<=V>IlP6B>!OCe6Gr-;Jr}nnx(2dyWzxqDMS2CjYaU6(7-ZHpE`=;Z$6WUF7nH@&n+=t zQ)^Kf9WFBdYPuaWFId}N2SRqBS8)OeSJ;0*M#diaFgor$w}+DZ&%0GocI%W^Ie+Ng ze30q`8vM@WY&IGDH?;gkw0)_Z$K}Lbxd>RxCHBtj*D1}bOal2dcPs*y?(dHC@C5Gcl!2|LNO>0bZM$x^v^47>E;~i|2Ew`{sa1^97(p7BWMaHh2%@1QD+H=)bj=%nbETuskuWE+1B0 zH~D;lgvafO^d34cG`YJz>bbPL{#ADjNsm##-Id-OyRJ4oyzFBP#}-P)xCoR3{!P5D zu<6Pq`z?=kINBklBkHT1Gd5QpaA~AENc-&ZF8ESm1)#aVXl19BBP+su(#;EQcTz(e z2juhyMu~KE?X0|<%Yvog<1BIUEH4MCo9fi`2uddGGT#_z+%zjz(~SCiX=Gt=eX$*O zWzTyU-m-aDn{e!vC*mflh-c9`k>Y7@8(kCIG)BxoD=Vdl-V~wx6h+u3&IJeeqcl71 z`5~Aw_%muy|964GEA}Rp*G`t6?!^~>L@P=7j~IpMV1BOhs>=yG59!2=zFCKI$hIX? z8Hn9w>hH|Iegn!=S1=^!ejil7{ddBsMro_qnucIK`}Zz__Q^B*J5yi2kdkf3o#t&N z-;wX)1}?4vBKP{nGv=Egwl<^V*J-MD1dSUyki;CO29(f;BE=JmzrGiimeWaCu6!-N zo|M|7csoV;s34ZMBIwi1ll&LMJ@y56)^6exI9}3AP3wD9vaece!z%>Jog_Q1o@&e8 zz>~_(tg1tNx=vnCN;x8TWVo<`Kt!pzk`Tz50@I0VkE~J<$q@mz@F3Gd+=_nR@FUSwh(Xe3n$Z!(YZ_Yq_On zX@5Bhk=@e}sFCf|c$vbb3~?V1#C_85W~<2(4OeZ*d2WG>_U9Y@M(CX3VR;nH^@Y zv|>648B1zkxbr^KMl&PLUmtEdf^Z1uxgQlBqa2=Jwe{9RfzN_huy0KyO>+hZ*5n}o zr6D=;nFU+t9LC9kE1gSs#Jg%iWxcYbWOus>OA6W}zcb&*(d4u;G=y(*1jmUJj*gU+ zx|R3uH_8pnwbRAtR7z8+m*$cTpTVm|`CFeY?VkGFv7|LAImv{NOrMgmqh!glMcYL) zwlRC71mv>r>K(;Q8K^0z8;ZKxTFwqtD-}mW0UVB8Z`eGdKjE$j-KaibZ!lCB;qdAw z@mtv=8Hj@W5W*n+ZuO8@Yt@gLqTgwSaUmnHlB&ykO(``cE|*31QJvYiYe+3?(Vt1kDW!^?=^4 zzp#Ck>*n~)IV$(Su|llG`UEaSl!Ed*!uXwtxZtBQ*rn>BX47Pp{$A_BXJvf&?{%Y= zcMTZwOtzh?KdeKvL`Mw-sPVyaJQN+m5Mr=Jv#k)y$3DT#FeQ6HS`%e^gN$v@ZikWO zjZf>vySl5^QIPG@r3`03llk$Nd|(XTEhu2}t*g1vApg_&V*Cpu7VBG5(hZ7Iou^J_ zlh?5s8l}cpJLS1W)YoumQd1Y}UEi`g{LG0_`dB89j>o3iI-kRUmYlhEyWivt4j`JW z5TL*|UOd+$bIEvIH7T{h%iTxv*+s-@iY9jt@$>5_<0pYJnw>}9RZ3U;47(K3R9RMt z2xoGBJ|0{uFMnii{@IoXhKoV=#3ub${RgcfkMhR4-W@u6`Xak;2eQvgkp=5{z((R@1XoKbdh11 zuLyVA>4_)b5pzl08&rb)o}>Qxtg#y9egPY&%0rfurOg1zpI=)r@1!<$4eOdEZzy)K zYC>f3Ng#`PM$UfYq90tX)P=-3f3flJX^04CU?QkVQ2L$Jy8iWuB=)V!3lX^iBlkFC z;!|6&XaoGJmrq5IvIGXXpQ`WHMD?Py1uuJd-ys?E-jn0wEs>FtB~;Ek&RlAsflgo` z386D2lJvDYCGn}n;faY_S11NrPp36p+Gy3`*XPX@8t!JlUKAi2_h-NK{p%h|dQ8zr zXJwV6Pi2Pr6fWScRP`PK*T>#F^9)f~%Z7_(pT9tdtFSW3Y!xUlIgokx{#m zsPZND12gqJvNCvmuyO1$vt|AlyR9&Djaqj-m+Y{)YOXsQzrOI)@CW@pJKGs3DKBVI zbFWj*`9JlS4I4i{eR<8JbNZ2PTF~r)8|c!Lbn1OVIqyVFY*YS&QhlTeVipfn+0;LqzuTuU1y+FMnXb zZ+njC3QIrGKS}5 ztW($zSD&3XzCw99grbi2ayY?l742~16u+@rJ{)OB^KxFV)@81*!fLaLWt9~x&X<1n z?gQ^Xvw(tzz%25u4JdIKmFA;o<9&=+KA-<=AvlTfMxqzU*(+vrQkSvucc9Qxy=s!P z>2ryPy2MDee_2rFs$J9W_8F#1^n0DFG-lDf57JIwA9u+zq{~pzkA6J8W48Va%FF9N z`25A*Lm>%zu*d+5Lpk1LZvNJCNz3MDjtk|cNOEnnN8Ipqb=8dqF6U{lLmGfX+(_um zj>sc{LH$?aNFMKWMC;()!&Wh`t+D6KYEQ(gd-S)oM6$B74zgc9S!(^;eub`Z1~5jU zW^`H!`Jtdu;}?+^pQ^e41xoCN0#W}*iFl|GJ@Yla-70}XbsU!#Pby>8`aMuMh#iMX zC>c4$&`ZrmBO=gx=m1M@Ns(#ue6}&+ZnWSs$!Lt$wA~BHKdgcM{GQ9|@mH-AILq;b|5z!oJsLefA|YPqxjsIgr;4dAH2Pdq#3{B^?@1V| zUBWK$>C-22Ryo$!Towx)D!wT8>Q$eBdl9q)9z4KOtFZE?a=`}LgFbMgc)4g{a2D6v zX~Hm!s5LhY1ur#jT;9H0K6JjaJ5!1lyo#jFW!^OM-k=|E(e8Ls*@bO#+VdWQ`LIxj zb*lHLDb^wk-8zC!-B09Hzh8-Mw7TE3*5r~Nk(b-jt{Xg3ApS5f;K#uU=Ckj;fEqOc z0p1V(5}u@y>>rdfGY8!=T`Da;m2R>GxpH>yY8aTM(QWeGDIe`OWMcB&bv_X06bw^m zB0f&o(aeZ>tAPEfKi@x()HPLew9Jg8@paO|oA=n>4#V4T-=M-cdUlLq4P!iaZk)%j@OT?oN|06~R8o z6BzDeb8Qr+5_5ytHSgdCzr5J%;+D__yCQH}rP@Ks=OFaRh9=`F@OnY28VCy@_h3eA=WRZ|j%0hQ{Ns^C)bPaI)dIjc+@VxS$@*S6*CP z>x+#RF4U|ETHk=&5n5QVM6;)^WylR@t zqmmOce(A}i?Qs7m)o83>4Ca)=^@OoDv%Z78WU}ye+#MZc_FP=mP8R zt(yDp27@YFpaCCh(V(Q3^$L=>^Uy^XJ9$sm`^XajQ_Sc^|3-C`4d1DP z!8TdgY&xz@Zwln*^s@>Z&^$3Ej_vqzu)+enE^4yq!C^zF{6%uy;uXB++Rc^lCb{^z zQ>FpS^9XA1vn&)QvER^ICWt-gyOxWO*wB_2tf&!)CmPu(7)BB|5jR7T_zt(>Teo$# z7LPl3(yx%<^vUP?zzyfMkyt5Mka2~;ZLgFcYhs)~&NY`Q6p&Q`CAONGjftCl;g}<5 z*BOif9m4_x)42v~JY{eV*aF0Gv#xS@Pjs~bL09xvz&#KpmCT;5R`Slv^1#b(d8*L4 zX??V?T<@0~bbqV*lxYuZuRSqdwZ<(^;tGs zqMr}t2{b|AndTVdy7faP7xucXHE9a05aMkcb}cbqnnCR~2)gw980>Xy2&eLWx_FPm zYO@8ZAA}pAzK8qSEc(nRe>xm2-_YKvhf(^dmbYF=4UUoFQ%}J1tF|C|u8-8CUG2&v zkZKTI+to5}Mm>atgEf6f0XZ$0GLXpQGdZ}24P%q>3^&NyEM(Q50gpSQq43P0r2cJV zwKpkj**B%^6ZNp@taVHOc8eTuQ(qAXK^gwhc4YlVo)k9sdih)rp!Yns5XT|8*#=g! zAma1!TW$Hl2G%WzM|tbS<@)o>>obgvEV#hooj&mh6o-Q=(Mm2!_{<+$TQHYDkl~#a zQZmJ~a|+RDnheXP1okwOdN*)IT-tMDtd=Lvw^T+zCMJ_akV}LOOZRak{(EfQdQe$l z+!bvj6~@7s_qKuhuIf-_#F=*ieHZ_SqgE4o8|};)CXbFmhGMmE!va6#mzLg#3;KfEGvnPEy}k0`%i-aqETtNn!4hK&i>9z0 zJQDiV1s8%LOY?=mQ*d1PHh0;t*7eMprIBHz`dr#R>@tSGdgm|uF23ZQ3mmBi;z*$4 zGvKhe+x@BoDc-XG$B~ZZmwgxdFE!GD0>|{nDGOqx4*#CV&46%@GXD_5HsS*(WdXV< z>_0kulN;1-s3?!xpF@=3C|?jA1Jx)QM_^_z)iXiF@NN{O+|9-^NYz*W7++~5hpkOfnbWm zbiYDyO@oq((1@Vm5mMe8Isu1Y=E*-nn8*j9#?mNhCBPs2Q)QM!m;XCL{8`pro(Nv~ z8!2$un!1UhQBLt9$PP}xt%f}U|CcHmgPHulFMs^cMF6WO9sf}8e~l1-s>?rSSsc?uTMX?pJ@P> z;eSv04VKZ7p z!zzrQBn2q6raIYUHIovjPUpy>ih`w&#hIsYQ0i)Q+%Rpt@oa^YMq5t#F}Nn;Dii6u zcee)8L?%^F-)@AIJhJC<$Rxel1T7Z(+w)k=Z=hg}XDb@fe6}#8V`I%p)N$eFZ-4o+ zQ&NC5Mlv$Sk=xcv$sSZJUVx4Fk$fyU{&i0}uk1;x!t-m83*@>tH69(njB-Bm#5cT)BTZQ?E0{caW% ziQ?qQML#Rl?4P`d-n0&a36Xgfo->(2aC@4u@${m&QCnc!6ER?df$Rn*(j`ZYT#Q}K z*nYgY0DYXxid^DzDIKdM|C*1Ue^Msf2Mc{AZGb%HM|vl2iFqYam}INm2yZ;hr#vu* zywG9XZ}?Wzvd9Q&Jp)=rf`Zh1Msw@b0+$Az?clpLyoISlCiPC=k=o`qJYluVq|tB7 zgciLBq>#s+Z0-U618_8x-gmBv4V&$?OM8<-dHn~a7v<#T<>cgzx9nhM z9k=SwY1lFA&#}|r?5(xaS3t)UORe`n;^&ef#Z8cJffkiY1p}da-CI!hRYV^r za{zTgrn?$nK#%Xo|F7Ky_z~FwJqQ^YzY$z+J7I0MH`cJ;Bie%skl#+&2Q+%eH^s(k zYR1OGGR1G?u_Y{0;H%zHAbj%A2}}Wyd-UH#ki$w$tsvTjNNC#Ps=>_+tTtS3g8Uxr9cO`!~uQ^VT`L)B&Goyi6KIKDk~nb5p& zSY(k&nHNH zCg*YS@f(gyBfoxuTBz%)Cz(~5;H?Q1GKcqtk8=wt%Bg!B0M=j;;`DKKa@ybP?tc05 zUMw|r{(UXZy@Cw^mFDW|6Eu^PV|zfU@gOcwYkvTqe zrD#k0IQ+Eku-xVCVzvZEKh(V0NYaQ<8*zo3(`ya)`Sz|jCOL4Pb1$T*n8hGO9@RVI ziP8xEXm4XmjLx~t_=6GWXYEiJw3H0YVa>q_HvvLyw_**bd1a4Nou0T=OtpeVf8 z#`L4cu)KbdH3SazU2!6$i(Lwhi`J^&T{TbT;&d{<%BP9?K;?YJ%JkQT=bV*1C%oGLti=RJd-CGz0u~kMt;~<)t39*ISw5y^L-hl(@ zZtB&U;yQvWYm!?zc}){$u3q@X(h8e8&NFi-#R)rE7%}($Fghh50Fu_)GR5WKp~z-n z*-m?j4`pvpm~6*jww9EX7$rnS;yps|fCJI;H#;tJ^FcU#n<+mSZ1Bh6!DMY?uD7w-IU4<(wJNp3dI%lWXC_OsJxH`ye~ZU&T# z64@_Z{kMr;`ueq?;Oke@(a`oZRTRM!QESx$2WT1izOEn@aySy_v?m0ezHe<|v0EOq z>e7nEaI!{11qV#&G*1t{qJWR~K46V(ZuVGRriu7vYrV7CNl#BPkFCCF*lDxJCiv?w zWZ-uG3TU(N?{YIR?zjf8wEO7e%T$+1x?MU}oM@zWYGHyMdbTuec!P9X_@hb(F$Ifw zRc3+er{${v8&Ua}>A;$cU%x}jyu}5k)$};Es_MNkwJZhz~Cr|T$8kN8-HCX27o%wS0VC|~>Hl$sB zlom$Rer#1>?jJJD?UoP-IyPS7M0US?WSu|w&sW9)>qRGDba(G=%gf_Cu~i{ZlH;f6 zSy?>=dm|%g5J0t#O$9skjdKFC4xXp{Z5_a-33NSCcvC!NVgy1d0t4Brl2Z1*FlFcT z_X`R3^{b+`JV`5JC*FhWW4LU`O$qP4omHp*B*G6<%~@#j((6gW_N&u@loSRx>P@Yc zyf=PtFEL4q3%UOKJT!#pWl#GJO=nGnxOh;-+&n0lYBEP^DnZ;(CAB=tdbdOblqd(K zm_*K%%)lC3-%Kwy-{>U<)wETmDa_9=Dtgm-wx>redOO5iUn-t+(hA{oTwtosu45qT zyz!7(>er4&k%zwbgQhRQ=9!Z$VGNV71OdRYOtl*q8a)eRk$L0du8f zQO9eNPRhIDHAS>2nXs;TK2A!eHTnBpib)ja)_i&vE{kS6QA;Ce@9o9m(m|OEVp6^f zKX$yLSLCs;E?kWw4R)iyf7g2#6<+x~x7he;xJS8vxg*6BzmmVV zbNwEpaWbLig1=9##z?K^twQlIN3SgVD8p=G|A3dqHzrNJU4nJe!PuueDO5(g1U((! zDa)mb__B@R>Tb*M7F{#raBs@=?z&I5K zZCn!EOG;s6Oi2lQs>MggA(^nyPbVHx)Oc;!lxaj>R5b~swSU>00Zo3;=!dF(Yxn9Z z7Q5*=&i=n zO-Wm8>-z3vS{yOPR8lX;wAJdW#u`5e)3dl)YI3T?cvLx9>o%l1P8rmTyA~6-TOLS( z`Xts3Wl2&}68foL7!OElnwVu1&SSNH5kVrgK?CEu_Wf`8Y8kf5GU*1X;%9f)9o$0O z&9Cm-W1Oel`V9Xn*>f8WoQcMa8U!`0oDY{KBo`2^C;<@=ixwo({nGbx(`%puPTxn~ zyVyR$J5T$_30vC_Lk0u}Jj+fgR~;Uw-g&ml{1Qm3OupS`E<|y)QR8O>|M=glVHe@< z%K=iYDb7jvOq=qBAF%J){4kGF^uDH>#IET*X_Ul@Q830VVU;gmdm1Rq*1Di$%WwkA zSry<^Fr=F_MKB*Sw|}OuXP=|&yF-}yo|s6(ZEcI)W!?4fnsTTebIF$M;yltMb<<{R zd(^!nNn2mS@mlexG2ipo4HEC%N%gJw=5pPS2fQzBYkk)SZZ@v6TXWet;LKaAiuI9# z-h^Sf_u9l3V?M9@-d9GvOoBvi@7j#r*;hWZLkuUc6X zdL?@mx9~w$Yi@n1@fe#Z-PGd@O^#wFJ0%}F<--{zy;gJg&HM-70PFLy`4RU!v#AIN zXHqaRR=KKn^ACP%1U`~aGF~6V_?J8P#rah#x$M@nf-Y1E8Sg@ZH#l53i2>KTD}VRG z<=e!79AWuJMwV+1oWxqN-TBn@Y)mxkkM z5|><*s@@@VE_f*HBV#?)h1OH}k=Er;UL)QP50fqpCZDXiZ~q)0(4bnBDOLPIBg}l~ zh)T!>e0BM92h|eZlrpa>N87NrFU)2XXd!6`*h6bU=yq^D*yMo;En;yAk#b%8Y=vD!>bQ1GzitPHm4b@p7xuxv&f&mDC&^e%Ee+4P?p|WRxGKXM-;ig*(OrpARc^7 zb1J)0{n%%tt3^$v<7qUg2`vTf9@>6q*7O=|r_Qpj9VB>{2Tlv8h6Z{Pf(kTu-Q+rG zwY^yPRBz#<%h!cK(lH1&LN@Y?!q_!mQ%9#77D+`^ug~_NPEo5y!-7iEJvEm^rNctK zEM+e3R0ZeW%c-38f12r_?hwTONhX@^b?*eFutxWYwv*`?r_Ihv5Jv%LR+i>!Uxi*< zW~;5^;t;M3M0k#AVam8Y4eY&YHp3zt7WXw@?-2W_`fKj`R2^zx>+C38KFv6o;ku%I z2;ygW6aKB^MaJZBd|2fv{#EmysDv_y*$u%k9p;A?cB^#Zy57u>h^IJos}K!gXSHDJ zJmyqFu(=Y!=bNoVf};Js?@Ce{M3E;Y2WsNAcDYimstFwKGD^u|(V(aD&*zk^7nlk< zBt(nuZl`kRf1L`l!XUDn(%)>#hrV!GV0g%DjT3Vcf;gIAldK9!#nFL{uZDtx`ECAl zNpb!?)gMb*hZ^(p&PZ9WXQIvS_%E7dTUpW5TUkXs!oJFEm$Xgd>P*>f@&)}q5mSZA zm#Hn6&&jvOB>Ev|x?G~_dVyE*gm4pcBztXpAfbxT53??R>0@LbYzA&GKF}~AxWFSDw_?jNEom!WMsk}<) zOVBA5db?GI&SJ`y^s$rzCNw*}MrE<^m!;;2JFmkSW>#6giKsi2jIHJ6_1_blaOBbq z{EOwUxekvDtPLC-1qB=&pwrE2uvhU17X^B#i3-^*7`S>6aGpo;~`e9+Y z!bMQ?TnJ@o=ar~*^d)G?(j?_Wkj3Ma?A2UF{va-@&9BtREgu3U< zrFqO8A6+D-`t%N74XwY#bFy^LZGB!T4acDy3ULB^tqKnql5Fdf%kl@`{WIn}GCk)? znzy#_O>=XEb&c#s%w!LR>f$`3QH@Hpwo&q4m&D?JcgE{8z)hxC=WHc*C)rrPTK?(T?)<@-a2Rdu?R zms0Tf3Y{rb`%cw`qd++>hl$;F%d`cLDBX=-p&11p5h;xgKX9)CDIChJ3`Ib4i2+ITBDyA zY`I)2X+8=N;aOGMU|oHo?Ev+?-b?bKB@`~R>RpVz8Bd-X?<)YROU2ggD-I^xxX|4n zfpBQQwNfaim(1kKq;PSOjdfv>dZ4j}hK8o5MnW?0d2JK?AcHV3zr#=IS^R+oiUZns z0blcXygk8O9R))UE^A{L9PVrE>(@MnN{G?c91O3QQeYKBDG|MvOK8o4n@T3k*n~v- z3kit@4xPQv>8DvEW;Fsm-Z^ikJ`H{D_EZdd)grLX%zKvkMtq8-uaC!8V_=57a#C(R z(QzGt)|U_+nD2YRd`!D7AkHOKDXC`7@do=-*#;-j`5rQou&%6E7U>EZe~-`ZAf{&; zeF~y?#@V|~bv!@nx|nIutsGolqG)8wqHhtqGqnKyp6uj7SeG{`LVv$%BXB%46oDx; zZ`nCgLqCw7L#%|PDNydjYY$iw`IdIf0-`m(`_mS)Q-_E9bTuWQ8BqrviN|fhs#am}zUr>=ySc-OK z{XX{8VrtUP&*zr?f2lp1i0-rEgq}YN zK@ZYmV8o==_nZp8ILi7X#%7hs;WR>|sP`5=TsgI4e@fOi28JBzN6ipi814D` z_4?uiw82&rVu0MNe^B>%ino?zoFm5m5QohGPR!afKKUdV@gzTDhuQe-{ipy|Z-GT7 z;XX>EvP;5ovL{c#{KDw9()Z=KcEpQBAuQ>>JdcZH&C|GTmzToR+2*y}(ssX=5r8x% znVZ`AtEmCaEJ-$AU$e5d*JES86$_4>!ryElYU*g-s*Rh2aPvPSa!smDh*LfF-X-9L ziny+|Rg}K8wT}7W#ts2YrHNZNY+$rxX-{!lwjVWG=f3NKACz&aEDIg}s#aaqmor9X zSkac)rX7t7bzOvN6)ELY$#o2jz+*-2!Y~RiIm~*ztJ|ZgQv2JZIR=<;Sq)l(>8|Tw z#mmKNM*u;H^=}B~ndwr0zoC0%y?8O_ON8akks#5IECgcuu|2)LcIwR1Aoy8gVrNQ) z-Ir0wF_ZIMccy$}rmZbWLRXTmyp?WH1bE5N9-9t3Xnj=qu02&Iy7a>SzfR+~C^`XN zq@qNfdLb!a;%wrx&jCsoUi0^2da&4a>avQ~zVF9~@02~OPG%?8q}pJvO@`W)z5k6f zKXD>Cz?9{+rB`QBX8{MJ^7s2IG!!;?wc=LyK?6QIp^mHt*O;}sZ);8JU2eJyN)6dm zqmF}O+E9#9nkFpmepbYVM&0>In`f%6NK0qr8ClqDd%;fW;@r&Y;rcjL^_}aDD^xD4 zH5>8R&ML?7bM{*JvKt+_18>ld98bM&78ObO)KylNd4@4??HOFX#KNRmtXJbSp@{3` z=onl)92P;Pq`d#@%pj^fc_0bhk9vlkJCp48fTP1I#JqolXbItHIr4AIRoX2f5o`cE zCj6M#j)Wz5yr0Fe#Mr#KXn&`6@@8MHJ$|LrZip?BgW3w9ZKJ!38R4R?Cd?Iq^VzL( z#qIh0tc65vqv3821(`+_do|F}#(jC9L`hsxuR7E`Sgx)&@ImfevWVksPrLvCg)&OY zQh&C#&d#n{{{)bi(?}NpK-2CfMVor$~2hsfb%rRdPzHw(JXef_WjUPv63ElY%zgP%AIUMXF?QOV<9cn_=nP2oyTI zPAB!k7uA8vz{40-(r9sn+;cq*41!NU078dyX(U31e`d2 zJG<6a(7{3EuUV)aHf-1(UbJi20?QPo0=ZT%84^mYc3;<`47@id3N-UH`;!7LlfZzn zewKN+)#+bfKsG%G1$B?_8wU4VZE2QEA-P(*;*g-)I`%wR~Fp>y$m+I;=qhnRu@a|+e1phV7ol$W`tB7vCc zmgTgPoCS<)VGmOgooHe#ZiHcHXLDx8O}40^ihT(qYoF4nTO}*WAW)Qbh zwBKAO*4mM-5$1x|dati@=6QZjC`LZd*CsH-b*!;FgH@?RN1l3XR8Dx z!}=Q+x|>Z#Dp%%k#blY;+cqtc5w@lAQ@z$dw6f<7$uf%scW z^sZSjP?Op1R?yUZ9A8U8c|Id(p`C8DPWG{VsS;sMeR5UimHNpt6{ehb@0Nt9yf^+f zVoq`Pj>2uk-Kd`$SSgP^xl1&8xWS9rq-Zj6B2Ss@ZAXR zNGsp1eCw4iGmHmwOAy)B*CLMUGFXZxLBZ0HGsD45ADbKl^I{0=8xN%ph7Fy&kx)^V z7fKAGoSf}UH{#YCGO8e+s!+YIWT$WFyW{4K{_x9f2Q5tGGGIy=PEX!@=#|Bl!e!cj zH;EMq1L0E;>RgWk;vlFqeM6FVluI&pEA`d6s&Woj{?Ha~En_ zdVmAKFPRUTHrbY5<6#V)zt}wqW{D6fyCK>OHTK40Lbw4fLmt}?dM;-ugk+q~&CLw| zd<;Hq@U>(o>P*{Ux>$)iHJ1&1$D*$gb(%rdGI;AsHjfzOGzWL}Tl}Ngo|A^NaVr)w{+?6LZ?#YESG( z3DviM`?kA%?_Oe}a;mLuwgGH~k)fX;5H;DG3=Aewh=v9rxf0t?-qJ>l{rG`zva(Wt zhKc*(MgpuQ4GKcT!(rW-Se2Am`}Y%DZsH2;Ex#mzjAsW5GD3PdCk$j)`PfkiW6{yW zIk5FW=64AWAxOK*_tJi}{?{+>?b(LGVqwrt7Lbz^2JrLLB7)WecL8)fK1=d5!DnP- z^`syO`1IeM-~Z+b|L274e^=_1BJQ$j3D32(Z+Nb}K>Ot`o2BeUAU6N9My|>@>y(jqY z9VEyX{@i5zUm`^Jcb7)n_SYNsI~(@!&-XX``wfG-lMm@tLmn3RcoXis6>GQyA}eAI zy;gkp_JZwHKnVDe8%S6!{F`MHIDQkk;1UZGvkIU6a{}O+|BVQ;e?k%^x)UN548gNk zon*C~OrAKIiW-APL{6q;{Ji}9TztG-ynJeWH%0mQMfrs}czH#6d52XfSpV$;8#|My zX72yj7Zje!O9U63her6@#7tDf)WPwooh{iz896RK?wgnX(mNpnF`$C1l1%aK$It!; D(4EVg literal 0 HcmV?d00001 diff --git a/automation/script/module.py b/automation/script/module.py new file mode 100644 index 0000000000..22ff9c5da3 --- /dev/null +++ b/automation/script/module.py @@ -0,0 +1,5062 @@ +# CM "script" automation that wraps native scripts with a unified CLI, Python API +# and JSON/YAML meta descriptions. +# +# It is a stable prototype being developed by Grigori Fursin and Arjun Suresh. +# +# We think to develop a simpler version of this automation at some point +# while keeping full backwards compatibility. +# +# Join the MLCommons taskforce on automation and reproducibility +# to discuss further developments: +# https://github.com/mlcommons/ck/blob/master/docs/taskforce.md + +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + CM "script" automation actions + (making native scripts more portable, deterministic, reusable and reproducible) + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + self.os_info = {} + self.run_state = {} + self.run_state['deps'] = [] + self.run_state['fake_deps'] = False + self.run_state['parent'] = None + self.run_state['version_info'] = [] + + self.file_with_cached_state = 'cm-cached-state.json' + + self.tmp_file_env = 'tmp-env' + self.tmp_file_env_all = 'tmp-env-all' + self.tmp_file_run = 'tmp-run' + self.tmp_file_state = 'tmp-state.json' + + self.tmp_file_run_state = 'tmp-run-state.json' + self.tmp_file_run_env = 'tmp-run-env.out' + self.tmp_file_ver = 'tmp-ver.out' + + self.__version__ = "1.2.1" + + self.local_env_keys = ['CM_VERSION', + 'CM_VERSION_MIN', + 'CM_VERSION_MAX', + 'CM_VERSION_MAX_USABLE', + 'CM_DETECTED_VERSION', + 'CM_INPUT', + 'CM_OUTPUT', + 'CM_NAME', + 'CM_EXTRA_CACHE_TAGS', + 'CM_TMP_*', + 'CM_GIT_*', + 'CM_RENEW_CACHE_ENTRY'] + + self.input_flags_converted_to_tmp_env = ['path'] + + self.input_flags_converted_to_env = ['input', + 'output', + 'name', + 'extra_cache_tags', + 'skip_compile', + 'skip_run', + 'accept_license', + 'skip_system_deps', + 'git_ssh', + 'gh_token'] + + + + + ############################################################ + def run(self, i): + """ + Run CM script + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): specify CM script (CM artifact) explicitly + + (tags) (str): tags to find an CM script (CM artifact) + + (env) (dict): global environment variables (can/will be updated by a given script and dependencies) + (const) (dict): constant environment variable (will be preserved and persistent for a given script and dependencies) + + (state) (dict): global state dictionary (can/will be updated by a given script and dependencies) + (const_state) (dict): constant state (will be preserved and persistent for a given script and dependencies) + + (add_deps) (dict): {"name": {"tag": "tag(s)"}, "name": {"version": "version_no"}, ...} + (add_deps_recursive) (dict): same as add_deps but is passed recursively onto dependencies as well + + (version) (str): version to be added to env.CM_VERSION to specialize this flow + (version_min) (str): min version to be added to env.CM_VERSION_MIN to specialize this flow + (version_max) (str): max version to be added to env.CM_VERSION_MAX to specialize this flow + (version_max_usable) (str): max USABLE version to be added to env.CM_VERSION_MAX_USABLE + + (path) (str): list of paths to be added to env.CM_TMP_PATH to specialize this flow + + (input) (str): converted to env.CM_INPUT (local env) + (output) (str): converted to env.CM_OUTPUT (local env) + + (extra_cache_tags) (str): converted to env.CM_EXTRA_CACHE_TAGS and used to add to caching (local env) + + (name) (str): taken from env.CM_NAME and/or converted to env.CM_NAME (local env) + Added to extra_cache_tags with "name-" prefix . + Useful for python virtual env (to create multiple entries) + + (quiet) (bool): if True, set env.CM_QUIET to "yes" and attempt to skip questions + (the developers have to support it in pre/post processing and scripts) + + (skip_cache) (bool): if True, skip caching and run in current directory + (force_cache) (bool): if True, force caching if can_force_cache=true in script meta + + (skip_remembered_selections) (bool): if True, skip remembered selections + (uses or sets env.CM_TMP_SKIP_REMEMBERED_SELECTIONS to "yes") + + (new) (bool): if True, skip search for cached and run again + (renew) (bool): if True, rewrite cache entry if exists + + (dirty) (bool): if True, do not clean files + + (save_env) (bool): if True, save env and state to tmp-env.sh/bat and tmp-state.json + (shell) (bool): if True, save env with cmd/bash and run it + + (recursion) (bool): True if recursive call. + Useful when preparing the global bat file or Docker container + to save/run it in the end. + + (recursion_spaces) (str, internal): adding ' ' during recursion for debugging + + (remembered_selections) (list): remember selections of cached outputs + + (print_env) (bool): if True, print aggregated env before each run of a native script + + (fake_run) (bool): if True, will run the dependent scripts but will skip the main run script + (prepare) (bool): the same as fake_run + (fake_deps) (bool): if True, will fake run the dependent scripts + (print_deps) (bool): if True, will print the CM run commands of the direct dependent scripts + (run_state) (dict): Internal run state + + (debug_script_tags) (str): if !='', run cmd/bash before executing a native command + inside a script specified by these tags + + (debug_script) (bool): if True, debug current script (set debug_script_tags to the tags of a current script) + (detected_versions) (dict): All the used scripts and their detected_versions + + (verbose) (bool): if True, prints all tech. info about script execution (False by default) + (v) (bool): the same as verbose + + (time) (bool): if True, print script execution time (or if verbose == True) + (space) (bool): if True, print used disk space for this script (or if verbose == True) + + (ignore_script_error) (bool): if True, ignore error code in native tools and scripts + and finish a given CM script. Useful to test/debug partial installations + + (json) (bool): if True, print output as JSON + (j) (bool): if True, print output as JSON + + (pause) (bool): if True, pause at the end of the main script (Press Enter to continue) + + (repro) (bool): if True, dump cm-run-script-input.json, cm-run_script_output.json, + cm-run-script-state.json, cm-run-script-info.json + to improve the reproducibility of results + + (repro_prefix) (str): if !='', use it to record above files {repro-prefix)-input.json ... + (repro_dir) (str): if !='', use this directory to dump info + + (script_call_prefix) (str): how to call script in logs and READMEs (cm run script) + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * (skipped) (bool): if true, this script was skipped + + * new_env (dict): new environment (delta from a collective script) + * new_state (dict): new state (delta from a collective script) + + * env (dict): global env (updated by this script - includes new_env) + * state (dict): global state (updated by this script - includes new_state) + + """ + + r = self._run(i) + + return r + + + ############################################################ + def _run(self, i): + + from cmind import utils + import copy + import time + import shutil + + # Check if save input/output to file + repro = i.get('repro', False) + repro_prefix = '' + + if repro: + repro_prefix = i.get('repro_prefix', '') + if repro_prefix == '': repro_prefix = 'cm-run-script' + + repro_dir = i.get('repro_dir', '') + if repro_dir == '': repro_dir = os.getcwd() + + repro_prefix = os.path.join (repro_dir, repro_prefix) + + if repro_prefix!='': + dump_repro_start(repro_prefix, i) + + + recursion = i.get('recursion', False) + + # If first script run, check if can write to current directory + if not recursion and not i.get('skip_write_test', False): + if not can_write_to_current_directory(): + return {'return':1, 'error':'Current directory "{}" is not writable - please change it'.format(os.getcwd())} + + recursion_int = int(i.get('recursion_int',0))+1 + + start_time = time.time() + + # Check extra input from environment variable CM_SCRIPT_EXTRA_CMD + # Useful to set up default flags such as the name of virtual enviroment + extra_cli = os.environ.get('CM_SCRIPT_EXTRA_CMD', '').strip() + if extra_cli != '': + from cmind import cli + r = cli.parse(extra_cli) + if r['return']>0: return r + + cm_input = r['cm_input'] + + utils.merge_dicts({'dict1':i, 'dict2':cm_input, 'append_lists':True, 'append_unique':True}) + + # Check simplified CMD: cm run script "get compiler" + # If artifact has spaces, treat them as tags! + artifact = i.get('artifact','') + if ' ' in artifact: # or ',' in artifact: + del(i['artifact']) + if 'parsed_artifact' in i: del(i['parsed_artifact']) + # Force substitute tags + i['tags']=artifact.replace(' ',',') + + # Check if has extra tags as a second artifact + # Example: cmr . "_python _tiny" + + parsed_artifacts = i.get('parsed_artifacts',[]) + if len(parsed_artifacts)>0: + extra_tags = parsed_artifacts[0][0][0] + if ' ' in extra_tags or ',' in extra_tags: + # Add tags + x=i.get('tags','') + if x!='': x+=',' + i['tags']=x+extra_tags.replace(' ',',') + + # Recursion spaces needed to format log and print + recursion_spaces = i.get('recursion_spaces', '') + # Caching selections to avoid asking users again + remembered_selections = i.get('remembered_selections', []) + + # Get current env and state before running this script and sub-scripts + env = i.get('env',{}) + state = i.get('state',{}) + const = i.get('const',{}) + const_state = i.get('const_state',{}) + + # Save current env and state to detect new env and state after running a given script + saved_env = copy.deepcopy(env) + saved_state = copy.deepcopy(state) + + for key in [ "env", "state", "const", "const_state" ]: + if i.get("local_"+key): + if not i.get(key, {}): + i[key] = {} + utils.merge_dicts({'dict1':i[key], 'dict2':i['local_'+key], 'append_lists':True, 'append_unique':True}) + + add_deps = i.get('ad',{}) + if not add_deps: + add_deps = i.get('add_deps',{}) + else: + utils.merge_dicts({'dict1':add_deps, 'dict2':i.get('add_deps', {}), 'append_lists':True, 'append_unique':True}) + + add_deps_recursive = i.get('adr', {}) + if not add_deps_recursive: + add_deps_recursive = i.get('add_deps_recursive', {}) + else: + utils.merge_dicts({'dict1':add_deps_recursive, 'dict2':i.get('add_deps_recursive', {}), 'append_lists':True, 'append_unique':True}) + + save_env = i.get('save_env', False) + + print_env = i.get('print_env', False) + + verbose = False + + if 'verbose' in i: verbose=i['verbose'] + elif 'v' in i: verbose=i['v'] + + if verbose: + env['CM_VERBOSE']='yes' + + show_time = i.get('time', False) + show_space = i.get('space', False) + + if not recursion and show_space: + start_disk_stats = shutil.disk_usage("/") + + extra_recursion_spaces = ' '# if verbose else '' + + skip_cache = i.get('skip_cache', False) + force_cache = i.get('force_cache', False) + + fake_run = i.get('fake_run', False) + fake_run = i.get('fake_run', False) if 'fake_run' in i else i.get('prepare', False) + if fake_run: env['CM_TMP_FAKE_RUN']='yes' + + fake_deps = i.get('fake_deps', False) + if fake_deps: env['CM_TMP_FAKE_DEPS']='yes' + + run_state = i.get('run_state', self.run_state) + if not run_state.get('version_info', []): + run_state['version_info'] = [] + if run_state.get('parent', '') == '': + run_state['parent'] = None + if fake_deps: + run_state['fake_deps'] = True + + print_deps = i.get('print_deps', False) + print_readme = i.get('print_readme', False) + + new_cache_entry = i.get('new', False) + renew = i.get('renew', False) + + cmd = i.get('cmd', '') + # Capturing the input command if it is coming from an access function + if not cmd and 'cmd' in i.get('input',''): + i['cmd'] = i['input']['cmd'] + cmd = i['cmd'] + + debug_script_tags = i.get('debug_script_tags', '') + + detected_versions = i.get('detected_versions', {}) + + ignore_script_error = i.get('ignore_script_error', False) + + # Get constant env and state + const = i.get('const',{}) + const_state = i.get('const_state',{}) + + # Detect current path and record in env for further use in native scripts + current_path = os.path.abspath(os.getcwd()) + env['CM_TMP_CURRENT_PATH'] = current_path + + # Check if quiet mode + quiet = i.get('quiet', False) if 'quiet' in i else (env.get('CM_QUIET','').lower() == 'yes') + if quiet: env['CM_QUIET'] = 'yes' + + skip_remembered_selections = i.get('skip_remembered_selections', False) if 'skip_remembered_selections' in i \ + else (env.get('CM_SKIP_REMEMBERED_SELECTIONS','').lower() == 'yes') + if skip_remembered_selections: env['CM_SKIP_REMEMBERED_SELECTIONS'] = 'yes' + + # Prepare debug info + parsed_script = i.get('parsed_artifact') + parsed_script_alias = parsed_script[0][0] if parsed_script is not None else '' + + + + + + # Get and cache minimal host OS info to be able to run scripts and manage OS environment + if len(self.os_info) == 0: + r = self.cmind.access({'action':'get_host_os_info', + 'automation':'utils,dc2743f8450541e3'}) + if r['return']>0: return r + + self.os_info = r['info'] + + os_info = self.os_info + + # Bat extension for this host OS + bat_ext = os_info['bat_ext'] + + # Add permanent env from OS (such as CM_WINDOWS:"yes" on Windows) + env_from_os_info = os_info.get('env',{}) + if len(env_from_os_info)>0: + env.update(env_from_os_info) + + #take some env from the user environment + keys = [ "GH_TOKEN", "ftp_proxy", "FTP_PROXY", "http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY", "socks_proxy", "SOCKS_PROXY" ] + for key in keys: + if os.environ.get(key, '') != '' and env.get(key, '') == '': + env[key] = os.environ[key] + + # Check path/input/output in input and pass to env + for key in self.input_flags_converted_to_tmp_env: + value = i.get(key, '').strip() + if value != '': + env['CM_TMP_' + key.upper()] = value + + for key in self.input_flags_converted_to_env: + value = i.get(key, '') + if type(value)==str: value=value.strip() + if value != '': + env['CM_' + key.upper()] = value + + + ############################################################################################################ + # Check if we want to skip cache (either by skip_cache or by fake_run) + force_skip_cache = True if skip_cache else False + force_skip_cache = True if fake_run else force_skip_cache + + + ############################################################################################################ + # Find CM script(s) based on their tags and variations to get their meta and customize this workflow. + # We will need to decide how to select if more than 1 (such as "get compiler") + # + # Note: this local search function will separate tags and variations + # + # STEP 100 Input: Search sripts by i['tags'] (includes variations starting from _) and/or i['parsed_artifact'] + # tags_string = i['tags'] + + tags_string = i.get('tags','').strip() + + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys']) + + ii['tags'] = tags_string + ii['out'] = None + + + # if cm run script without tags/artifact and with --help + if len(ii.get('parsed_artifact',[]))==0 and ii.get('tags','')=='' and i.get('help',False): + return utils.call_internal_module(self, __file__, 'module_help', 'print_help', {'meta':{}, 'path':''}) + + r = self.search(ii) + if r['return']>0: return r + + # Search function will return + + list_of_found_scripts = r['list'] + + script_tags = r['script_tags'] + script_tags_string = ','.join(script_tags) + + variation_tags = r['variation_tags'] + +# # Print what was searched! +# cm_script_info = 'CM script' +# +# x = 'with' +# if parsed_script_alias !='' : +# cm_script_info += ' '+x+' alias "{}"'.format(parsed_script_alias) +# x = 'and' +# +# if len(script_tags)>0: +# cm_script_info += ' '+x+' tags "{}"'.format(script_tags_string.replace(',',' ')) +# x = 'and' +# +# if len(variation_tags)>0: +# x_variation_tags = ['_'+v for v in variation_tags] +# cm_script_info += ' '+x+' variations "{}"'.format(" ".join(x_variation_tags)) +# +# if verbose: +# print ('') +# print (recursion_spaces + '* Searching for ' + cm_script_info) +# else: +# print (recursion_spaces + '* Running ' + cm_script_info) + + + cm_script_info = i.get('script_call_prefix', '').strip() + if cm_script_info == '': cm_script_info = 'cm run script' + if not cm_script_info.endswith(' '): cm_script_info+=' ' + + x = '"' + y = ' ' + if parsed_script_alias !='' : + cm_script_info += parsed_script_alias + x = ' --tags="' + y = ',' + + if len(script_tags)>0 or len(variation_tags)>0: + cm_script_info += x + + if len(script_tags)>0: + cm_script_info += script_tags_string.replace(',',y) + + if len(variation_tags)>0: + if len(script_tags)>0: cm_script_info+=' ' + + x_variation_tags = ['_'+v for v in variation_tags] + cm_script_info += y.join(x_variation_tags) + + cm_script_info += '"' + +# if verbose: +# print ('') + + print ('') + print (recursion_spaces + '* ' + cm_script_info) + + + ############################################################################# + # Report if scripts were not found or there is an ambiguity with UIDs + if not r['found_scripts']: + return {'return':1, 'error': 'no scripts were found with above tags (when variations ignored)'} + + if len(list_of_found_scripts) == 0: + return {'return':16, 'error':'no scripts were found with above tags and variations\n'+r.get('warning', '')} + + # Sometimes there is an ambiguity when someone adds a script + # while duplicating a UID. In such case, we will return >1 script + # and will start searching in the cache ... + # We are detecing such cases here: + if len(list_of_found_scripts)>1 and script_tags_string=='' and parsed_script_alias!='' and '?' not in parsed_script_alias and '*' not in parsed_script_alias: + x='Ambiguity in the following scripts have the same UID - please change that in _cm.json or _cm.yaml:\n' + for y in list_of_found_scripts: + x+=' * '+y.path+'\n' + + return {'return':1, 'error':x} + + # STEP 100 Output: list_of_found_scripts based on tags (with variations) and/or parsed_artifact + # script_tags [] - contains tags without variations (starting from _ such as _cuda) + # variation_tags [] - contains only variations tags (without _) + # string_tags_string [str] (joined script_tags) + + + + + + + + + ############################################################################# + # Sort scripts for better determinism + list_of_found_scripts = sorted(list_of_found_scripts, key = lambda a: (a.meta.get('sort',0), + a.path)) + if verbose: + print (recursion_spaces + ' - Number of scripts found: {}'.format(len(list_of_found_scripts))) + + # Check if script selection is remembered + if not skip_remembered_selections and len(list_of_found_scripts) > 1: + for selection in remembered_selections: + if selection['type'] == 'script' and set(selection['tags'].split(',')) == set(script_tags_string.split(',')): + # Leave 1 entry in the found list + list_of_found_scripts = [selection['cached_script']] + if verbose: + print (recursion_spaces + ' - Found remembered selection with tags: {}'.format(script_tags_string)) + break + + + # STEP 200 Output: potentially pruned list_of_found_scripts if selection of multple scripts was remembered + + + + + + + # STEP 300: If more than one CM script found (example: "get compiler"), + # first, check if selection was already remembered! + # second, check in cache to prune scripts + + # STEP 300 input: lit_of_found_scripts + + select_script = 0 + + # If 1 script found and script_tags == '', pick them from the meta + if script_tags_string == '' and len(list_of_found_scripts) == 1: + script_tags_string = ','.join(list_of_found_scripts[0].meta.get('tags',[])) + + # Found 1 or more scripts. Scans cache tags to find at least 1 with cache==True + preload_cached_scripts = False + for script in list_of_found_scripts: + if script.meta.get('cache', False) == True or (script.meta.get('can_force_cache', False) and force_cache): + preload_cached_scripts = True + break + + # STEP 300 Output: preload_cached_scripts = True if at least one of the list_of_found_scripts must be cached + + + + + + + # STEP 400: If not force_skip_cache and at least one script can be cached, find (preload) related cache entries for found scripts + # STEP 400 input: script_tags and -tmp (to avoid unfinished scripts particularly when installation fails) + + cache_list = [] + + if not force_skip_cache and preload_cached_scripts: + cache_tags_without_tmp_string = '-tmp' + if script_tags_string !='': + cache_tags_without_tmp_string += ',' + script_tags_string + if variation_tags: + cache_tags_without_tmp_string += ',_' + ",_".join(variation_tags) + # variation_tags are prefixed with "_" but the CM search function knows only tags and so we need to change "_-" to "-_" for excluding any variations + # This change can later be moved to a search function specific to cache + cache_tags_without_tmp_string = cache_tags_without_tmp_string.replace(",_-", ",-_") + + if verbose: + print (recursion_spaces + ' - Searching for cached script outputs with the following tags: {}'.format(cache_tags_without_tmp_string)) + + search_cache = {'action':'find', + 'automation':self.meta['deps']['cache'], + 'tags':cache_tags_without_tmp_string} + rc = self.cmind.access(search_cache) + if rc['return']>0: return rc + + cache_list = rc['list'] + + if verbose: + print (recursion_spaces + ' - Number of cached script outputs found: {}'.format(len(cache_list))) + + # STEP 400 output: cache_list + + + + + + + # STEP 500: At this stage with have cache_list related to either 1 or more scripts (in case of get,compiler) + # If more than 1: Check if in cache and reuse it or ask user to select + # STEP 500 input: list_of_found_scripts + + if len(list_of_found_scripts) > 0: + # If only tags are used, check if there are no cached scripts with tags - then we will reuse them + # The use case: cm run script --tags=get,compiler + # CM script will always ask to select gcc,llvm,etc even if any of them will be already cached + if len(cache_list) > 0: + new_list_of_found_scripts = [] + + for cache_entry in cache_list: + # Find associated script and add to the list_of_found_scripts + associated_script_artifact = cache_entry.meta['associated_script_artifact'] + + x = associated_script_artifact.find(',') + if x<0: + return {'return':1, 'error':'CM artifact format is wrong "{}" - no comma found'.format(associated_script_artifact)} + + associated_script_artifact_uid = associated_script_artifact[x+1:] + + cache_entry.meta['associated_script_artifact_uid'] = associated_script_artifact_uid + + for script in list_of_found_scripts: + script_uid = script.meta['uid'] + + if associated_script_artifact_uid == script_uid: + if script not in new_list_of_found_scripts: + new_list_of_found_scripts.append(script) + + # Avoid case when all scripts are pruned due to just 1 variation used + if len(new_list_of_found_scripts)>0: + list_of_found_scripts = new_list_of_found_scripts + + # Select scripts + if len(list_of_found_scripts) > 1: + select_script = select_script_artifact(list_of_found_scripts, 'script', recursion_spaces, False, script_tags_string, quiet, verbose) + + # Remember selection + if not skip_remembered_selections: + remembered_selections.append({'type': 'script', + 'tags':script_tags_string, + 'cached_script':list_of_found_scripts[select_script]}) + else: + select_script = 0 + + # Prune cache list with the selected script + if len(list_of_found_scripts) > 0: + script_artifact_uid = list_of_found_scripts[select_script].meta['uid'] + + new_cache_list = [] + for cache_entry in cache_list: + if cache_entry.meta['associated_script_artifact_uid'] == script_artifact_uid: + new_cache_list.append(cache_entry) + + cache_list = new_cache_list + + # Here a specific script is found and meta obtained + # Set some useful local variables + script_artifact = list_of_found_scripts[select_script] + + meta = script_artifact.meta + path = script_artifact.path + + # Check path to repo + script_repo_path = script_artifact.repo_path + + script_repo_path_with_prefix = script_artifact.repo_path + if script_artifact.repo_meta.get('prefix', '') != '': + script_repo_path_with_prefix = os.path.join(script_repo_path, script_artifact.repo_meta['prefix']) + + env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path + env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix + + # Check if has --help + if i.get('help',False): + return utils.call_internal_module(self, __file__, 'module_help', 'print_help', {'meta':meta, 'path':path}) + + run_state['script_id'] = meta['alias'] + "," + meta['uid'] + run_state['script_variation_tags'] = variation_tags + + deps = meta.get('deps',[]) + post_deps = meta.get('post_deps',[]) + prehook_deps = meta.get('prehook_deps',[]) + posthook_deps = meta.get('posthook_deps',[]) + input_mapping = meta.get('input_mapping', {}) + docker_settings = meta.get('docker') + docker_input_mapping = {} + if docker_settings: + docker_input_mapping = docker_settings.get('docker_input_mapping', {}) + new_env_keys_from_meta = meta.get('new_env_keys', []) + new_state_keys_from_meta = meta.get('new_state_keys', []) + + found_script_artifact = utils.assemble_cm_object(meta['alias'], meta['uid']) + + found_script_tags = meta.get('tags',[]) + + if i.get('debug_script', False): + debug_script_tags=','.join(found_script_tags) + + if verbose: + print (recursion_spaces+' - Found script::{} in {}'.format(found_script_artifact, path)) + + + # STEP 500 output: script_artifact - unique selected script artifact + # (cache_list) pruned for the unique script if cache is used + # meta - script meta + # path - script path + # found_script_tags [] - all tags of the found script + + + + + + + + + + + + + + # HERE WE HAVE ORIGINAL ENV + + # STEP 600: Continue updating env + # Add default env from meta to new env if not empty + # (env NO OVERWRITE) + script_artifact_default_env = meta.get('default_env',{}) + for key in script_artifact_default_env: + env.setdefault(key, script_artifact_default_env[key]) + + + # Force env from meta['env'] as a CONST + # (env OVERWRITE) + script_artifact_env = meta.get('env',{}) + env.update(script_artifact_env) + + + + + + + + + + + + + + + + # STEP 700: Overwrite env with keys from the script input (to allow user friendly CLI) + # IT HAS THE PRIORITY OVER meta['default_env'] and meta['env'] + # (env OVERWRITE - user enforces it from CLI) + # (it becomes const) + if input_mapping: + update_env_from_input_mapping(env, i, input_mapping) + update_env_from_input_mapping(const, i, input_mapping) + + # This mapping is done in module_misc + #if docker_input_mapping: + # update_env_from_input_mapping(env, i, docker_input_mapping) + # update_env_from_input_mapping(const, i, docker_input_mapping) + + + + + + + # STEP 800: Process variations and update env (overwrite from env and update form default_env) + # VARIATIONS HAS THE PRIORITY OVER + # MULTIPLE VARIATIONS (THAT CAN BE TURNED ON AT THE SAME TIME) SHOULD NOT HAVE CONFLICTING ENV + + # VARIATIONS OVERWRITE current ENV but not input keys (they become const) + + + + + variations = script_artifact.meta.get('variations', {}) + state['docker'] = meta.get('docker', {}) + + r = self._update_state_from_variations(i, meta, variation_tags, variations, env, state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, add_deps_recursive, run_state, recursion_spaces, verbose) + if r['return'] > 0: + return r + + warnings = meta.get('warnings', []) + if len(r.get('warnings', [])) >0: + warnings += r['warnings'] + + variation_tags_string = r['variation_tags_string'] + explicit_variation_tags = r['explicit_variation_tags'] + + # USE CASE: + # HERE we may have versions in script input and env['CM_VERSION_*'] + + # STEP 900: Get version, min, max, usable from env (priority if passed from another script to force version), + # then script input, then script meta + + # VERSIONS SHOULD NOT BE USED INSIDE VARIATIONS (in meta)! + + # First, take version from input + version = i.get('version', '').strip() + version_min = i.get('version_min', '').strip() + version_max = i.get('version_max', '').strip() + version_max_usable = i.get('version_max_usable', '').strip() + + # Second, take from env + if version == '': version = env.get('CM_VERSION','') + if version_min == '': version_min = env.get('CM_VERSION_MIN','') + if version_max == '': version_max = env.get('CM_VERSION_MAX','') + if version_max_usable == '': version_max_usable = env.get('CM_VERSION_MAX_USABLE','') + + + # Third, take from meta + if version == '': version = meta.get('version', '') + if version_min == '': version_min = meta.get('version_min', '') + if version_max == '': version_max = meta.get('version_max', '') + if version_max_usable == '': version_max_usable = meta.get('version_max_usable', '') + + # Update env with resolved versions + notes = [] + for version_index in [(version, 'CM_VERSION', ' == {}'), + (version_min, 'CM_VERSION_MIN', ' >= {}'), + (version_max, 'CM_VERSION_MAX', ' <= {}'), + (version_max_usable, 'CM_VERSION_MAX_USABLE', '({})')]: + version_value = version_index[0] + key = version_index[1] + note = version_index[2] + + if version_value !='': + env[key] = version_value + + notes.append(note.format(version_value)) +# elif key in env: +# # If version_X is "", remove related key from ENV ... +# del(env[key]) + + if len(notes)>0: + if verbose: + print (recursion_spaces+' - Requested version: ' + ' '.join(notes)) + + # STEP 900 output: version* set + # env['CM_VERSION*] set + + + + # STEP 1000: Update version only if in "versions" (not obligatory) + # can be useful when handling complex Git revisions + versions = script_artifact.meta.get('versions', {}) + + if version!='' and version in versions: + versions_meta = versions[version] + r = update_state_from_meta(versions_meta, env, state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, i) + if r['return']>0: return r + adr=get_adr(versions_meta) + if adr: + self._merge_dicts_with_tags(add_deps_recursive, adr) + #Processing them again using updated deps for add_deps_recursive + r = update_adr_from_meta(deps, post_deps, prehook_deps, posthook_deps, add_deps_recursive) + + + # STEP 1100: Update deps from input + r = update_deps_from_input(deps, post_deps, prehook_deps, posthook_deps, i) + if r['return']>0: return r + + + r = update_env_with_values(env) + if r['return']>0: return r + + if str(env.get('CM_RUN_STATE_DOCKER', False)).lower() in ['true', '1', 'yes']: + if state.get('docker'): + if str(state['docker'].get('run', True)).lower() in ['false', '0', 'no']: + print (recursion_spaces+' - Skipping script::{} run as we are inside docker'.format(found_script_artifact)) + return {'return': 0} + elif str(state['docker'].get('docker_real_run', True)).lower() in ['false', '0', 'no']: + print (recursion_spaces+' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) + fake_run = True + env['CM_TMP_FAKE_RUN']='yes' + + + + ############################################################################################################ + # Check extra cache tags + x = env.get('CM_EXTRA_CACHE_TAGS','').strip() + extra_cache_tags = [] if x=='' else x.split(',') + + if i.get('extra_cache_tags','')!='': + for x in i['extra_cache_tags'].strip().split(','): + if x!='': + if '<<<' in x: + import re + tmp_values = re.findall(r'<<<(.*?)>>>', str(x)) + for tmp_value in tmp_values: + xx = str(env.get(tmp_value,'')) + x = x.replace("<<<"+tmp_value+">>>", xx) + if x not in extra_cache_tags: + extra_cache_tags.append(x) + + if env.get('CM_NAME','')!='': + extra_cache_tags.append('name-'+env['CM_NAME'].strip().lower()) + + + + ############################################################################################################ + # Check if need to clean output files + clean_output_files = meta.get('clean_output_files', []) + + if len(clean_output_files)>0: + clean_tmp_files(clean_output_files, recursion_spaces) + + + + + + + ############################################################################################################ + # Check if the output of a selected script should be cached + cache = False if i.get('skip_cache', False) else meta.get('cache', False) + cache = False if fake_run else cache + cache = cache or (i.get('force_cache', False) and meta.get('can_force_cache', False)) + + cached_uid = '' + cached_tags = [] + cached_meta = {} + + remove_tmp_tag = False + reuse_cached = False + + found_cached = False + cached_path = '' + + local_env_keys_from_meta = meta.get('local_env_keys', []) + + + + + + ############################################################################################################ + # Check if script is cached if we need to skip deps from cached entries + this_script_cached = False + + ############################################################################################################ + # Check if the output of a selected script should be cached + if cache: + # TBD - need to reuse and prune cache_list instead of a new CM search inside find_cached_script + + r = find_cached_script({'self':self, + 'recursion_spaces':recursion_spaces, + 'script_tags':script_tags, + 'found_script_tags':found_script_tags, + 'variation_tags':variation_tags, + 'explicit_variation_tags':explicit_variation_tags, + 'version':version, + 'version_min':version_min, + 'version_max':version_max, + 'extra_cache_tags':extra_cache_tags, + 'new_cache_entry':new_cache_entry, + 'meta':meta, + 'env':env, + 'skip_remembered_selections':skip_remembered_selections, + 'remembered_selections':remembered_selections, + 'quiet':quiet, + 'verbose':verbose + }) + if r['return'] >0: return r + + # Sort by tags to ensure determinism in order (and later add versions) + found_cached_scripts = sorted(r['found_cached_scripts'], key = lambda x: sorted(x.meta['tags'])) + + cached_tags = r['cached_tags'] + search_tags = r['search_tags'] + + num_found_cached_scripts = len(found_cached_scripts) + + if num_found_cached_scripts > 0: + selection = 0 + + # Check if quiet mode + if num_found_cached_scripts > 1: + if quiet: + num_found_cached_scripts = 1 + + if num_found_cached_scripts > 1: + selection = select_script_artifact(found_cached_scripts, 'cached script output', recursion_spaces, True, script_tags_string, quiet, verbose) + + if selection >= 0: + if not skip_remembered_selections: + # Remember selection + remembered_selections.append({'type': 'cache', + 'tags':search_tags, + 'cached_script':found_cached_scripts[selection]}) + else: + num_found_cached_scripts = 0 + + + elif num_found_cached_scripts == 1: + if verbose: + print (recursion_spaces+' - Found cached script output: {}'.format(found_cached_scripts[0].path)) + + + if num_found_cached_scripts > 0: + found_cached = True + + # Check chain of dynamic dependencies on other CM scripts + if len(deps)>0: + if verbose: + print (recursion_spaces + ' - Checking dynamic dependencies on other CM scripts:') + + r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, True, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + if verbose: + print (recursion_spaces + ' - Processing env after dependencies ...') + + r = update_env_with_values(env) + if r['return']>0: return r + + + # Check chain of prehook dependencies on other CM scripts. (No execution of customize.py for cached scripts) + if verbose: + print (recursion_spaces + ' - Checking prehook dependencies on other CM scripts:') + + r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + # Continue with the selected cached script + cached_script = found_cached_scripts[selection] + + if verbose: + print (recursion_spaces+' - Loading state from cached entry ...') + + path_to_cached_state_file = os.path.join(cached_script.path, + self.file_with_cached_state) + + r = utils.load_json(file_name = path_to_cached_state_file) + if r['return']>0: return r + version = r['meta'].get('version') + + print (recursion_spaces + ' ! load {}'.format(path_to_cached_state_file)) + + + ################################################################################################ + # IF REUSE FROM CACHE - update env and state from cache! + cached_state = r['meta'] + + new_env = cached_state['new_env'] + utils.merge_dicts({'dict1':env, 'dict2':new_env, 'append_lists':True, 'append_unique':True}) + + new_state = cached_state['new_state'] + utils.merge_dicts({'dict1':state, 'dict2':new_state, 'append_lists':True, 'append_unique':True}) + + utils.merge_dicts({'dict1':new_env, 'dict2':const, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':new_state, 'dict2':const_state, 'append_lists':True, 'append_unique':True}) + + + + + + + # Check chain of posthook dependencies on other CM scripts. We consider them same as postdeps when + # script is in cache + if verbose: + print (recursion_spaces + ' - Checking posthook dependencies on other CM scripts:') + + clean_env_keys_post_deps = meta.get('clean_env_keys_post_deps',[]) + + r = self._call_run_deps(posthook_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + if verbose: + print (recursion_spaces + ' - Checking post dependencies on other CM scripts:') + + # Check chain of post dependencies on other CM scripts + r = self._call_run_deps(post_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + + + + + if renew or (not found_cached and num_found_cached_scripts == 0): + # Add more tags to cached tags + # based on meta information of the found script + x = 'script-artifact-' + meta['uid'] + if x not in cached_tags: + cached_tags.append(x) + + # Add all tags from the original CM script + for x in meta.get('tags', []): + if x not in cached_tags: + cached_tags.append(x) + + + if not found_cached and num_found_cached_scripts == 0: + + # If not cached, create cached script artifact and mark as tmp (remove if cache successful) + tmp_tags = ['tmp'] + + # Finalize tmp tags + tmp_tags += [ t for t in cached_tags if not t.startswith("-") ] + + # Check if some variations are missing + # though it should not happen! + for t in variation_tags: + if t.startswith("-"): + continue + x = '_' + t + if x not in tmp_tags: + tmp_tags.append(x) + + # Use update to update the tmp one if already exists + if verbose: + print (recursion_spaces+' - Creating new "cache" script artifact in the CM local repository ...') + print (recursion_spaces+' - Tags: {}'.format(','.join(tmp_tags))) + + if version != '': + cached_meta['version'] = version + + ii = {'action':'update', + 'automation': self.meta['deps']['cache'], + 'search_tags':tmp_tags, + 'tags':','.join(tmp_tags), + 'meta':cached_meta, + 'force':True} + + r = self.cmind.access(ii) + if r['return'] > 0: return r + + remove_tmp_tag = True + + cached_script = r['list'][0] + + cached_path = cached_script.path + cached_meta = cached_script.meta + + cached_uid = cached_meta['uid'] + + # Changing path to CM script artifact for cached output + # to record data and files there + if verbose: + print (recursion_spaces+' - Changing to {}'.format(cached_path)) + + os.chdir(cached_path) + + + + # If found cached and we want to renew it + if found_cached and renew: + cached_path = cached_script.path + cached_meta = cached_script.meta + + cached_uid = cached_meta['uid'] + + # Changing path to CM script artifact for cached output + # to record data and files there + if verbose: + print (recursion_spaces+' - Changing to {}'.format(cached_path)) + + os.chdir(cached_path) + + # Force to finalize script inside cached entry + found_cached = False + remove_tmp_tag = True + + env['CM_RENEW_CACHE_ENTRY']='yes' + + # Prepare files to be cleaned + clean_files = [self.tmp_file_run_state, + self.tmp_file_run_env, + self.tmp_file_ver, + self.tmp_file_env + bat_ext, + self.tmp_file_env_all + bat_ext, + self.tmp_file_state, + self.tmp_file_run + bat_ext] + + if not found_cached and len(meta.get('clean_files', [])) >0: + clean_files = meta['clean_files'] + clean_files + + ################################ + if not found_cached: + if len(warnings)>0: + print ('=================================================') + print ('WARNINGS:') + print ('') + for w in warnings: + print (' '+w) + print ('=================================================') + + # Update default version meta if version is not set + if version == '': + default_version = meta.get('default_version', '') + if default_version != '': + version = default_version + + if version_min != '': + ry = self.cmind.access({'action':'compare_versions', + 'automation':'utils,dc2743f8450541e3', + 'version1':version, + 'version2':version_min}) + if ry['return']>0: return ry + + if ry['comparison'] < 0: + version = version_min + + if version_max != '': + ry = self.cmind.access({'action':'compare_versions', + 'automation':'utils,dc2743f8450541e3', + 'version1':version, + 'version2':version_max}) + if ry['return']>0: return ry + + if ry['comparison'] > 0: + if version_max_usable!='': + version = version_max_usable + else: + version = version_max + + if verbose: + print (recursion_spaces+' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version)) + + env['CM_VERSION'] = version + + if 'version-'+version not in cached_tags: cached_tags.append('version-'+version) + + if default_version in versions: + versions_meta = versions[default_version] + r = update_state_from_meta(versions_meta, env, state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, i) + if r['return']>0: return r + + if "add_deps_recursive" in versions_meta: + self._merge_dicts_with_tags(add_deps_recursive, versions_meta['add_deps_recursive']) + + # Run chain of docker dependencies if current run cmd is from inside a docker container + docker_deps = [] + if i.get('docker_run_deps'): + docker_meta = meta.get('docker') + if docker_meta: + docker_deps = docker_meta.get('deps', []) + if docker_deps: + docker_deps = [ dep for dep in docker_deps if not dep.get('skip_inside_docker', False) ] + + if len(docker_deps)>0: + + if verbose: + print (recursion_spaces + ' - Checking docker run dependencies on other CM scripts:') + + r = self._call_run_deps(docker_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, False, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + if verbose: + print (recursion_spaces + ' - Processing env after docker run dependencies ...') + + r = update_env_with_values(env) + if r['return']>0: return r + + # Check chain of dependencies on other CM scripts + if len(deps)>0: + if verbose: + print (recursion_spaces + ' - Checking dependencies on other CM scripts:') + + r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, False, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + if verbose: + print (recursion_spaces + ' - Processing env after dependencies ...') + + r = update_env_with_values(env) + if r['return']>0: return r + + # Clean some output files + clean_tmp_files(clean_files, recursion_spaces) + + # Check if has customize.py + path_to_customize_py = os.path.join(path, 'customize.py') + customize_code = None + + # Prepare common input to prepare and run script + run_script_input = { + 'path': path, + 'bat_ext': bat_ext, + 'os_info': os_info, + 'const': const, + 'state': state, + 'const_state': const_state, + 'reuse_cached': reuse_cached, + 'recursion': recursion, + 'recursion_spaces': recursion_spaces, + 'remembered_selections': remembered_selections, + 'tmp_file_run_state': self.tmp_file_run_state, + 'tmp_file_run_env': self.tmp_file_run_env, + 'tmp_file_state': self.tmp_file_state, + 'tmp_file_run': self.tmp_file_run, + 'local_env_keys': self.local_env_keys, + 'local_env_keys_from_meta': local_env_keys_from_meta, + 'posthook_deps': posthook_deps, + 'add_deps_recursive': add_deps_recursive, + 'remembered_selections': remembered_selections, + 'found_script_tags': found_script_tags, + 'variation_tags_string': variation_tags_string, + 'found_cached': False, + 'debug_script_tags': debug_script_tags, + 'verbose': verbose, + 'meta':meta, + 'self': self + } + + if repro_prefix != '': run_script_input['repro_prefix'] = repro_prefix + if ignore_script_error: run_script_input['ignore_script_error'] = True + + if os.path.isfile(path_to_customize_py): + r=utils.load_python_module({'path':path, 'name':'customize'}) + if r['return']>0: return r + + customize_code = r['code'] + + customize_common_input = { + 'input':i, + 'automation':self, + 'artifact':script_artifact, + 'customize':script_artifact.meta.get('customize',{}), + 'os_info':os_info, + 'recursion_spaces':recursion_spaces, + 'script_tags':script_tags, + 'variation_tags':variation_tags + } + + run_script_input['customize_code'] = customize_code + run_script_input['customize_common_input'] = customize_common_input + + # Assemble PIP versions + pip_version_string = '' + + pip_version = env.get('CM_VERSION', '') + pip_version_min = env.get('CM_VERSION_MIN', '') + pip_version_max = env.get('CM_VERSION_MAX', '') + + if pip_version != '': + pip_version_string = '=='+pip_version + elif pip_version_min != '' and pip_version_max != '': + pip_version_string = '>='+pip_version_min+',<='+pip_version_max + elif pip_version_min != '': + pip_version_string = '>='+pip_version_min + elif pip_version_max != '': + pip_version_string = '<='+pip_version_max + + env['CM_TMP_PIP_VERSION_STRING'] = pip_version_string + if pip_version_string != '': + if verbose: + print (recursion_spaces+' # potential PIP version string (if needed): '+pip_version_string) + + # Check if pre-process and detect + if 'preprocess' in dir(customize_code) and not fake_run: + + if verbose: + print (recursion_spaces+' - Running preprocess ...') + + # Update env and state with const + utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':state, 'dict2':const_state, 'append_lists':True, 'append_unique':True}) + + run_script_input['run_state'] = run_state + + ii = copy.deepcopy(customize_common_input) + ii['env'] = env + ii['state'] = state + ii['meta'] = meta + ii['run_script_input'] = run_script_input # may need to detect versions in multiple paths + + r = customize_code.preprocess(ii) + if r['return']>0: return r + + # Check if preprocess says to skip this component + skip = r.get('skip', False) + + if skip: + if verbose: + print (recursion_spaces+' - this script is skipped!') + + # Check if script asks to run other dependencies instead of the skipped one + another_script = r.get('script', {}) + + if len(another_script) == 0: + return {'return':0, 'skipped': True} + + if verbose: + print (recursion_spaces+' - another script is executed instead!') + + ii = { + 'action':'run', + 'automation':utils.assemble_cm_object(self.meta['alias'], self.meta['uid']), + 'recursion_spaces':recursion_spaces + extra_recursion_spaces, + 'recursion':True, + 'remembered_selections': remembered_selections, + 'env':env, + 'state':state, + 'const':const, + 'const_state':const_state, + 'save_env':save_env, + 'add_deps_recursive':add_deps_recursive + } + + ii.update(another_script) + + # Return to current path + os.chdir(current_path) + + ############################################################################################################ + return self.cmind.access(ii) + + # If return version + if cache: + if r.get('version','') != '': + cached_tags = [x for x in cached_tags if not x.startswith('version-')] + cached_tags.append('version-' + r['version']) + + if len(r.get('add_extra_cache_tags',[]))>0: + for t in r['add_extra_cache_tags']: + if t not in cached_tags: + cached_tags.append(t) + + + if print_env: + import json + if verbose: + print (json.dumps(env, indent=2, sort_keys=True)) + + # Check chain of pre hook dependencies on other CM scripts + if len(prehook_deps)>0: + if verbose: + print (recursion_spaces + ' - Checking prehook dependencies on other CM scripts:') + + r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + if not fake_run: + env_key_mappings = meta.get("env_key_mappings", {}) + if env_key_mappings: + update_env_keys(env, env_key_mappings) + + run_script_input['meta'] = meta + run_script_input['env'] = env + run_script_input['run_state'] = run_state + run_script_input['recursion'] = recursion + + r = prepare_and_run_script_with_postprocessing(run_script_input) + if r['return']>0: return r + + # If return version + if r.get('version','') != '': + version = r.get('version') + if cache: + cached_tags = [x for x in cached_tags if not x.startswith('version-')] + cached_tags.append('version-' + r['version']) + + if len(r.get('add_extra_cache_tags',[]))>0 and cache: + for t in r['add_extra_cache_tags']: + if t not in cached_tags: + cached_tags.append(t) + + # Check chain of post dependencies on other CM scripts + clean_env_keys_post_deps = meta.get('clean_env_keys_post_deps',[]) + + r = self._run_deps(post_deps, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + # Add extra tags from env updated by deps (such as python version and compiler version, etc) + extra_cache_tags_from_env = meta.get('extra_cache_tags_from_env',[]) + for extra_cache_tags in extra_cache_tags_from_env: + key = extra_cache_tags['env'] + prefix = extra_cache_tags.get('prefix','') + + v = env.get(key,'').strip() + if v!='': + for t in v.split(','): + x = 'deps-' + prefix + t + if x not in cached_tags: + cached_tags.append(x) + + + detected_version = env.get('CM_DETECTED_VERSION', env.get('CM_VERSION','')) + dependent_cached_path = env.get('CM_GET_DEPENDENT_CACHED_PATH','') + + ############################################################################################################ + ##################################### Finalize script + + # Force consts in the final new env and state + utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':state, 'dict2':const_state, 'append_lists':True, 'append_unique':True}) + + if i.get('force_new_env_keys', []): + new_env_keys = i['force_new_env_keys'] + else: + new_env_keys = new_env_keys_from_meta + + if i.get('force_new_state_keys', []): + new_state_keys = i['force_new_state_keys'] + else: + new_state_keys = new_state_keys_from_meta + + r = detect_state_diff(env, saved_env, new_env_keys, new_state_keys, state, saved_state) + if r['return']>0: return r + + new_env = r['new_env'] + new_state = r['new_state'] + + utils.merge_dicts({'dict1':saved_env, 'dict2':new_env, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':saved_state, 'dict2':new_state, 'append_lists':True, 'append_unique':True}) + + + + # Restore original env/state and merge env/state + # This is needed since we want to keep original env/state outside this script + # If we delete env and create a new dict, the original one outside this script will be detached + # That's why we just clean all keys in original env/state (used oustide) + # And then copy saved_env (with new_env merged) and saved_state (with new_state merged) + # while getting rid of all temporal updates in env and state inside this script + + for k in list(env.keys()): + del(env[k]) + for k in list(state.keys()): + del(state[k]) + + env.update(saved_env) + state.update(saved_state) + + + + # Prepare env script content (to be saved in cache and in the current path if needed) + env_script = convert_env_to_script(new_env, os_info, start_script = os_info['start_script']) + + # If using cached script artifact, return to default path and then update the cache script artifact + if cache and cached_path!='': + # Check if need to remove tag + if remove_tmp_tag: + # Save state, env and deps for reuse + r = utils.save_json(file_name = os.path.join(cached_path, self.file_with_cached_state), + meta={'new_state':new_state, 'new_env':new_env, 'deps':deps, 'version': version}) + if r['return']>0: return r + + # Save all env + env_all_script = convert_env_to_script(env, os_info, start_script = os_info['start_script']) + + r = record_script(os.path.join(cached_path, self.tmp_file_env_all + bat_ext), + env_all_script, os_info) + if r['return']>0: return r + + # Save env + r = record_script(os.path.join(cached_path, self.tmp_file_env + bat_ext), + env_script, os_info) + if r['return']>0: return r + + # Remove tmp tag from the "cached" arifact to finalize caching + if verbose: + print (recursion_spaces+' - Removing tmp tag in the script cached output {} ...'.format(cached_uid)) + + # Check if version was detected and record in meta) + if detected_version != '': + cached_meta['version'] = detected_version + + if found_script_artifact != '': + cached_meta['associated_script_artifact'] = found_script_artifact + + x = found_script_artifact.find(',') + if x<0: + return {'return':1, 'error':'CM artifact format is wrong "{}" - no comma found'.format(found_script_artifact)} + + cached_meta['associated_script_artifact_uid'] = found_script_artifact[x+1:] + + + # Check if the cached entry is dependent on any other cached entry + if dependent_cached_path != '': + if os.path.isdir(cached_path) and os.path.isdir(dependent_cached_path): + if not os.path.samefile(cached_path, dependent_cached_path): + cached_meta['dependent_cached_path'] = dependent_cached_path + + ii = {'action': 'update', + 'automation': self.meta['deps']['cache'], + 'artifact': cached_uid, + 'meta':cached_meta, + 'replace_lists': True, # To replace tags + 'tags':','.join(cached_tags)} + + r = self.cmind.access(ii) + if r['return']>0: return r + + # Clean tmp files only in current path (do not touch cache - we keep all info there) + script_path = os.getcwd() + os.chdir(current_path) + + shell = i.get('shell', False) +# if not shell: +# shell = i.get('debug', False) + + if not shell and not i.get('dirty', False) and not cache: + clean_tmp_files(clean_files, recursion_spaces) + + # Record new env and new state in the current dir if needed + if save_env or shell: + # Check if script_prefix in the state from other components + where_to_add = len(os_info['start_script']) + + script_prefix = state.get('script_prefix',[]) + if len(script_prefix)>0: + env_script.insert(where_to_add, '\n') + for x in reversed(script_prefix): + env_script.insert(where_to_add, x) + + if shell: + x=['cmd', '.', '','.bat',''] if os_info['platform'] == 'windows' else ['bash', ' ""', '"','.sh','. ./'] + + env_script.append('\n') + env_script.append('echo{}\n'.format(x[1])) + env_script.append('echo {}Working path: {}{}'.format(x[2], script_path, x[2])) + xtmp_run_file = '' + tmp_run_file = 'tmp-run{}'.format(x[3]) + if os.path.isfile(tmp_run_file): + xtmp_run_file = 'Change and run "{}". '.format(tmp_run_file) + + env_script.append('echo {}Running debug shell. {}Type exit to quit ...{}\n'.format(x[2], xtmp_run_file, x[2])) + env_script.append('echo{}\n'.format(x[1])) + env_script.append('\n') + env_script.append(x[0]) + + env_file = self.tmp_file_env + bat_ext + + r = record_script(env_file, env_script, os_info) + if r['return']>0: return r + + if shell: + x = env_file if os_info['platform'] == 'windows' else '. ./'+env_file + os.system(x) + + if not version and detected_version: + version = detected_version + + if version: + script_uid = script_artifact.meta.get('uid') + script_alias = script_artifact.meta.get('alias') + script_tags = script_artifact.meta.get('tags') + version_info = {} + version_info_tags = ",".join(script_tags + variation_tags) + version_info[version_info_tags] = {} + version_info[version_info_tags]['script_uid'] = script_uid + version_info[version_info_tags]['script_alias'] = script_alias + version_info[version_info_tags]['version'] = version + version_info[version_info_tags]['parent'] = run_state['parent'] + run_state['version_info'].append(version_info) + script_versions = detected_versions.get(meta['uid'], []) + if not script_versions: + detected_versions[meta['uid']] = [ version ] + else: + script_versions.append(version) + else: + pass # these scripts don't have versions. Should we use cm mlops version here? + + ############################# RETURN + elapsed_time = time.time() - start_time + + if verbose and cached_uid!='': + print (recursion_spaces+' - cache UID: {}'.format(cached_uid)) + + if print_deps: + print_deps_data = self._print_deps(run_state['deps']) + new_state['print_deps'] = print_deps_data + + if print_readme: + readme = self._get_readme(i.get('cmd', ''), run_state['deps']) + with open('readme.md', 'w') as f: + f.write(readme) + + if i.get('dump_version_info'): + r = self._dump_version_info_for_script() + if r['return'] > 0: + return r + + rr = {'return':0, 'env':env, 'new_env':new_env, 'state':state, 'new_state':new_state, 'deps': run_state['deps']} + + # Print output as json to console + if i.get('json', False) or i.get('j', False): + import json + + print ('') + print (json.dumps(rr, indent=2)) + + + + # Check if save json to file + if repro_prefix !='': + dump_repro(repro_prefix, rr, run_state) + + if verbose or show_time: + print (recursion_spaces+' - running time of script "{}": {:.2f} sec.'.format(','.join(found_script_tags), elapsed_time)) + + + if not recursion and show_space: + stop_disk_stats = shutil.disk_usage("/") + + used_disk_space_in_mb = int((start_disk_stats.free - stop_disk_stats.free) / (1024*1024)) + + if used_disk_space_in_mb > 0: + print (recursion_spaces+' - used disk space: {} MB'.format(used_disk_space_in_mb)) + + + # Check if pause (useful if running a given script in a new terminal that may close automatically) + if i.get('pause', False): + print ('') + input ('Press Enter to continue ...') + + # Check if need to print some final info such as path to model, etc + print_env_at_the_end = meta.get('print_env_at_the_end',{}) + if len(print_env_at_the_end)>0: + print ('') + + for p in sorted(print_env_at_the_end): + t = print_env_at_the_end[p] + if t == '': t = 'ENV[{}]'.format(p) + + v = new_env.get(p, None) + + print ('{}: {}'.format(t, str(v))) + + print ('') + + return rr + + ###################################################################################### + def _dump_version_info_for_script(self, output_dir = os.getcwd()): + import json + with open(os.path.join(output_dir, 'version_info.json'), 'w') as f: + f.write(json.dumps(self.run_state['version_info'], indent=2)) + return {'return': 0} + + ###################################################################################### + def _update_state_from_variations(self, i, meta, variation_tags, variations, env, state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, add_deps_recursive, run_state, recursion_spaces, verbose): + + # Save current explicit variations + import copy + explicit_variation_tags=copy.deepcopy(variation_tags) + + # Calculate space + required_disk_space = {} + + # Check if warning + warnings = [] + + # variation_tags get appended by any aliases + r = self._get_variations_with_aliases(variation_tags, variations) + if r['return'] > 0: + return r + variation_tags = r['variation_tags'] + excluded_variation_tags = r['excluded_variation_tags'] + + # Get a dictionary of variation groups + r = self._get_variation_groups(variations) + if r['return'] > 0: + return r + + variation_groups = r['variation_groups'] + + run_state['variation_groups'] = variation_groups + + # Add variation(s) if specified in the "tags" input prefixed by _ + # If there is only 1 default variation, then just use it or substitute from CMD + + default_variation = meta.get('default_variation', '') + + if default_variation and default_variation not in variations: + return {'return': 1, 'error': 'Default variation "{}" is not in the list of variations: "{}" '.format(default_variation, variations.keys())} + + if len(variation_tags) == 0: + if default_variation != '' and default_variation not in excluded_variation_tags: + variation_tags = [default_variation] + + r = self._update_variation_tags_from_variations(variation_tags, variations, variation_groups, excluded_variation_tags) + if r['return'] > 0: + return r + + # variation_tags get appended by any default on variation in groups + r = self._process_variation_tags_in_groups(variation_tags, variation_groups, excluded_variation_tags, variations) + if r['return'] > 0: + return r + if variation_tags != r['variation_tags']: + variation_tags = r['variation_tags'] + + # we need to again process variation tags if any new default variation is added + r = self._update_variation_tags_from_variations(variation_tags, variations, variation_groups, excluded_variation_tags) + if r['return'] > 0: + return r + + + valid_variation_combinations = meta.get('valid_variation_combinations', []) + if valid_variation_combinations: + if not any ( all(t in variation_tags for t in s) for s in valid_variation_combinations): + return {'return': 1, 'error': 'Invalid variation combination "{}" prepared. Valid combinations: "{}" '.format(variation_tags, valid_variation_combinations)} + + invalid_variation_combinations = meta.get('invalid_variation_combinations', []) + if invalid_variation_combinations: + if any ( all(t in variation_tags for t in s) for s in invalid_variation_combinations): + return {'return': 1, 'error': 'Invalid variation combination "{}" prepared. Invalid combinations: "{}" '.format(variation_tags, invalid_variation_combinations)} + + variation_tags_string = '' + if len(variation_tags)>0: + for t in variation_tags: + if variation_tags_string != '': + variation_tags_string += ',' + + x = '_' + t + variation_tags_string += x + + if verbose: + print (recursion_spaces+' Prepared variations: {}'.format(variation_tags_string)) + + # Update env and other keys if variations + if len(variation_tags)>0: + for variation_tag in variation_tags: + if variation_tag.startswith('~'): + # ignore such tag (needed for caching only to differentiate variations) + continue + + if variation_tag.startswith('-'): + # ignore such tag (needed for caching only to eliminate variations) + continue + + variation_tag_dynamic_suffix = None + if variation_tag not in variations: + if '.' in variation_tag and variation_tag[-1] != '.': + variation_tag_dynamic_suffix = variation_tag[variation_tag.index(".")+1:] + if not variation_tag_dynamic_suffix: + return {'return':1, 'error':'tag {} is not in variations {}'.format(variation_tag, variations.keys())} + variation_tag = self._get_name_for_dynamic_variation_tag(variation_tag) + if variation_tag not in variations: + return {'return':1, 'error':'tag {} is not in variations {}'.format(variation_tag, variations.keys())} + + variation_meta = variations[variation_tag] + if variation_tag_dynamic_suffix: + self._update_variation_meta_with_dynamic_suffix(variation_meta, variation_tag_dynamic_suffix) + + r = update_state_from_meta(variation_meta, env, state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, i) + if r['return']>0: return r + + if variation_meta.get('script_name', '')!='': + meta['script_name'] = variation_meta['script_name'] + + if variation_meta.get('required_disk_space', 0) > 0 and variation_tag not in required_disk_space: + required_disk_space[variation_tag] = variation_meta['required_disk_space'] + + if variation_meta.get('warning', '') != '': + x = variation_meta['warning'] + if x not in warnings: warnings.append() + + adr=get_adr(variation_meta) + if adr: + self._merge_dicts_with_tags(add_deps_recursive, adr) + + combined_variations = [ t for t in variations if ',' in t ] + + combined_variations.sort(key=lambda x: x.count(',')) + ''' By sorting based on the number of variations users can safely override + env and state in a larger combined variation + ''' + + for combined_variation in combined_variations: + v = combined_variation.split(",") + all_present = set(v).issubset(set(variation_tags)) + if all_present: + + combined_variation_meta = variations[combined_variation] + + r = update_state_from_meta(combined_variation_meta, env, state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, i) + if r['return']>0: return r + + adr=get_adr(combined_variation_meta) + if adr: + self._merge_dicts_with_tags(add_deps_recursive, adr) + + if combined_variation_meta.get('script_name', '')!='': + meta['script_name'] = combined_variation_meta['script_name'] + + if combined_variation_meta.get('required_disk_space', 0) > 0 and combined_variation not in required_disk_space: + required_disk_space[combined_variation] = combined_variation_meta['required_disk_space'] + + if combined_variation_meta.get('warning', '') != '': + x = combined_variation_meta['warning'] + if x not in warnings: warnings.append(x) + + #Processing them again using updated deps for add_deps_recursive + r = update_adr_from_meta(deps, post_deps, prehook_deps, posthook_deps, add_deps_recursive) + if r['return']>0: return r + + if len(required_disk_space)>0: + required_disk_space_sum_mb = sum(list(required_disk_space.values())) + + warnings.append('Required disk space: {} MB'.format(required_disk_space_sum_mb)) + + return {'return': 0, 'variation_tags_string': variation_tags_string, 'explicit_variation_tags': explicit_variation_tags, 'warnings':warnings} + + ###################################################################################### + def _update_variation_tags_from_variations(self, variation_tags, variations, variation_groups, excluded_variation_tags): + + import copy + tmp_variation_tags_static = copy.deepcopy(variation_tags) + for v_i in range(len(tmp_variation_tags_static)): + v = tmp_variation_tags_static[v_i] + + if v not in variations: + v_static = self._get_name_for_dynamic_variation_tag(v) + tmp_variation_tags_static[v_i] = v_static + + combined_variations = [ t for t in variations if ',' in t ] + # We support default_variations in the meta of cmbined_variations + combined_variations.sort(key=lambda x: x.count(',')) + ''' By sorting based on the number of variations users can safely override + env and state in a larger combined variation + ''' + tmp_combined_variations = {k: False for k in combined_variations} + + # Recursively add any base variations specified + if len(variation_tags) > 0: + tmp_variations = {k: False for k in variation_tags} + while True: + for variation_name in variation_tags: + tag_to_append = None + + #ignore the excluded variations + if variation_name.startswith("~") or variation_name.startswith("-"): + tmp_variations[variation_name] = True + continue + + if variation_name not in variations: + variation_name = self._get_name_for_dynamic_variation_tag(variation_name) + + # base variations are automatically turned on. Only variations outside of any variation group can be added as a base_variation + if "base" in variations[variation_name]: + base_variations = variations[variation_name]["base"] + for base_variation in base_variations: + dynamic_base_variation = False + dynamic_base_variation_already_added = False + if base_variation not in variations: + base_variation_dynamic = self._get_name_for_dynamic_variation_tag(base_variation) + if not base_variation_dynamic or base_variation_dynamic not in variations: + return {'return': 1, 'error': 'Variation "{}" specified as base variation of "{}" is not existing'.format(base_variation, variation_name)} + else: + dynamic_base_variation = True + base_prefix = base_variation_dynamic.split(".")[0]+"." + for x in variation_tags: + if x.startswith(base_prefix): + dynamic_base_variation_already_added = True + + if base_variation not in variation_tags and not dynamic_base_variation_already_added: + tag_to_append = base_variation + + if tag_to_append: + if tag_to_append in excluded_variation_tags: + return {'return': 1, 'error': 'Variation "{}" specified as base variation for the variation is in the excluded list "{}" '.format(tag_to_append, variation_name)} + variation_tags.append(tag_to_append) + tmp_variations[tag_to_append] = False + + tag_to_append = None + + # default_variations dictionary specifies the default_variation for each variation group. A default variation in a group is turned on if no other variation from that group is turned on and it is not excluded using the '-' prefix + r = self._get_variation_tags_from_default_variations(variations[variation_name], variations, variation_groups, tmp_variation_tags_static, excluded_variation_tags) + if r['return'] > 0: + return r + + variations_to_add = r['variations_to_add'] + for t in variations_to_add: + tmp_variations[t] = False + variation_tags.append(t) + + tmp_variations[variation_name] = True + + for combined_variation in combined_variations: + if tmp_combined_variations[combined_variation]: + continue + v = combined_variation.split(",") + all_present = set(v).issubset(set(variation_tags)) + if all_present: + combined_variation_meta = variations[combined_variation] + tmp_combined_variations[combined_variation] = True + + r = self._get_variation_tags_from_default_variations(combined_variation_meta, variations, variation_groups, tmp_variation_tags_static, excluded_variation_tags) + if r['return'] > 0: + return r + + variations_to_add = r['variations_to_add'] + for t in variations_to_add: + tmp_variations[t] = False + variation_tags.append(t) + + all_base_processed = True + for variation_name in variation_tags: + if variation_name.startswith("-"): + continue + if variation_name not in variations: + variation_name = self._get_name_for_dynamic_variation_tag(variation_name) + if tmp_variations[variation_name] == False: + all_base_processed = False + break + if all_base_processed: + break + return {'return': 0} + + ###################################################################################### + def _get_variation_tags_from_default_variations(self, variation_meta, variations, variation_groups, tmp_variation_tags_static, excluded_variation_tags): + # default_variations dictionary specifies the default_variation for each variation group. A default variation in a group is turned on if no other variation from that group is turned on and it is not excluded using the '-' prefix + + tmp_variation_tags = [] + if "default_variations" in variation_meta: + default_base_variations = variation_meta["default_variations"] + for default_base_variation in default_base_variations: + tag_to_append = None + + if default_base_variation not in variation_groups: + return {'return': 1, 'error': 'Default variation "{}" is not a valid group. Valid groups are "{}" '.format(default_base_variation, variation_groups)} + + unique_allowed_variations = variation_groups[default_base_variation]['variations'] + # add the default only if none of the variations from the current group is selected and it is not being excluded with - prefix + if len(set(unique_allowed_variations) & set(tmp_variation_tags_static)) == 0 and default_base_variations[default_base_variation] not in excluded_variation_tags and default_base_variations[default_base_variation] not in tmp_variation_tags_static: + tag_to_append = default_base_variations[default_base_variation] + + if tag_to_append: + if tag_to_append not in variations: + variation_tag_static = self._get_name_for_dynamic_variation_tag(tag_to_append) + if not variation_tag_static or variation_tag_static not in variations: + return {'return': 1, 'error': 'Invalid variation "{}" specified in default variations for the variation "{}" '.format(tag_to_append, variation_meta)} + tmp_variation_tags.append(tag_to_append) + + return {'return': 0, 'variations_to_add': tmp_variation_tags} + + ############################################################ + def version(self, i): + """ + Print version + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + console = i.get('out') == 'con' + + version = self.__version__ + + if console: + print (version) + + return {'return':0, 'version':version} + + + ############################################################ + def search(self, i): + """ + Overriding the automation search function to filter out scripts not matching the given variation tags + + TBD: add input/output description + """ + + console = i.get('out') == 'con' + + # Check simplified CMD: cm run script "get compiler" + # If artifact has spaces, treat them as tags! + artifact = i.get('artifact','') + if ' ' in artifact: # or ',' in artifact: + del(i['artifact']) + if 'parsed_artifact' in i: del(i['parsed_artifact']) + # Force substitute tags + i['tags']=artifact.replace(' ',',') + + ############################################################################################################ + # Process tags to find script(s) and separate variations + # (not needed to find scripts) + tags_string = i.get('tags','').strip() + + tags = [] if tags_string == '' else tags_string.split(',') + + script_tags = [] + variation_tags = [] + + for t in tags: + t = t.strip() + if t != '': + if t.startswith('_'): + tx = t[1:] + if tx not in variation_tags: + variation_tags.append(tx) + elif t.startswith('-_'): + tx = '-' + t[2:] + if tx not in variation_tags: + variation_tags.append(tx) + else: + script_tags.append(t) + + excluded_tags = [ v[1:] for v in script_tags if v.startswith("-") ] + common = set(script_tags).intersection(set(excluded_tags)) + if common: + return {'return':1, 'error': 'There is common tags {} in the included and excluded lists'.format(common)} + + excluded_variation_tags = [ v[1:] for v in variation_tags if v.startswith("-") ] + common = set(variation_tags).intersection(set(excluded_variation_tags)) + if common: + return {'return':1, 'error': 'There is common variation tags {} in the included and excluded lists'.format(common)} + + ############################################################################################################ + # Find CM script(s) based on thier tags to get their meta (can be more than 1) + # Then check if variations exists inside meta + + i['tags'] = ','.join(script_tags) + + i['out'] = None + i['common'] = True + + r = super(CAutomation,self).search(i) + if r['return']>0: return r + + lst = r['list'] + + r['unfiltered_list'] = lst + + found_scripts = False if len(lst) == 0 else True + + if found_scripts and len(variation_tags)>0: + filtered = [] + + for script_artifact in lst: + meta = script_artifact.meta + variations = meta.get('variations', {}) + + matched = True + for t in variation_tags: + if t.startswith('-'): + t = t[1:] + if t in variations: + continue + matched = False + for s in variations: + if s.endswith('.#'): + if t.startswith(s[:-1]) and t[-1] != '.': + matched = True + break + if not matched: + break + if not matched: + continue + + filtered.append(script_artifact) + + if len(lst) > 0 and not filtered: + warning = [""] + for script in lst: + meta = script.meta + variations = meta.get('variations', {}) + warning.append('variation tags {} are not matching for the found script {} with variations {}\n'.format(variation_tags, meta.get('alias'), variations.keys())) + r['warning'] = "\n".join(warning) + + r['list'] = filtered + + # Print filtered paths if console + if console: + for script in r['list']: + print (script.path) + + # Finalize output + r['script_tags'] = script_tags + r['variation_tags'] = variation_tags + r['found_scripts'] = found_scripts + + return r + + ############################################################ + def test(self, i): + """ + Test automation (TBD) + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + + # Check parsed automation + if 'parsed_automation' not in i: + return {'return':1, 'error':'automation is not specified'} + + console = i.get('out') == 'con' + + # Find CM artifact(s) + i['out'] = None + r = self.search(i) + + if r['return']>0: return r + + lst = r['list'] + for script_artifact in lst: + path = script_artifact.path + meta = script_artifact.meta + original_meta = script_artifact.original_meta + + alias = meta.get('alias','') + uid = meta.get('uid','') + + if console: + print ('') + print (path) + print (' Test: TBD') + + + return {'return':0, 'list': lst} + + + ############################################################ + def native_run(self, i): + """ + Add CM script + + Args: + (CM input dict): + + env (dict): environment + command (str): string + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + env = i.get('env', {}) + cmd = i.get('command', '') + + script = i.get('script',[]) + + # Create temporary script name + script_name = i.get('script_name','') + if script_name=='': + script_name='tmp-native-run.' + + if os.name == 'nt': + script_name+='bat' + else: + script_name+='sh' + + if os.name == 'nt': + xcmd = 'call '+script_name + + if len(script)==0: + script.append('@echo off') + script.append('') + else: + xcmd = 'chmod 755 '+script_name+' ; ./'+script_name + + if len(script)==0: + script.append('#!/bin/bash') + script.append('') + + # Assemble env + if len(env)>0: + for k in env: + v=env[k] + + if os.name == 'nt': + script.append('set '+k+'='+v) + else: + if ' ' in v: v='"'+v+'"' + script.append('export '+k+'='+v) + + script.append('') + + # Add CMD + script.append(cmd) + + # Record script + r = utils.save_txt(file_name=script_name, string='\n'.join(script)) + if r['return']>0: return r + + # Run script + rc = os.system(xcmd) + + return {'return':0, 'return_code':rc} + + ############################################################ + def add(self, i): + """ + Add CM script + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (tags) (str): tags to find an CM script (CM artifact) + + (script_name) (str): name of script (it will be copied to the new entry and added to the meta) + + (tags) (string or list): tags to be added to meta + + (new_tags) (string or list): new tags to be added to meta (the same as tags) + + (json) (bool): if True, record JSON meta instead of YAML + + (meta) (dict): preloaded meta + + (template) (string): template to use (python) + (python) (bool): template=python + (pytorch) (bool): template=pytorch + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import shutil + + console = i.get('out') == 'con' + + # Try to find script artifact by alias and/or tags + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys']) + + parsed_artifact = i.get('parsed_artifact',[]) + + artifact_obj = parsed_artifact[0] if len(parsed_artifact)>0 else None + artifact_repo = parsed_artifact[1] if len(parsed_artifact)>1 else None + + script_name = '' + if 'script_name' in i: + script_name = i.get('script_name','').strip() + del(i['script_name']) + + if script_name != '' and not os.path.isfile(script_name): + return {'return':1, 'error':'file {} not found'.format(script_name)} + + # Move tags from input to meta of the newly created script artifact + tags_list = utils.convert_tags_to_list(i) + if 'tags' in i: del(i['tags']) + + if len(tags_list)==0: + if console: + x=input('Please specify a combination of unique tags separated by comma for this script: ') + x = x.strip() + if x!='': + tags_list = x.split(',') + + if len(tags_list)==0: + return {'return':1, 'error':'you must specify a combination of unique tags separate by comman using "--new_tags"'} + + # Add placeholder (use common action) + ii['out']='con' + ii['common']=True # Avoid recursion - use internal CM add function to add the script artifact + + # Check template path + template_dir = 'template' + + template = i.get('template','') + + if template == '': + if i.get('python', False): + template = 'python' + elif i.get('pytorch', False): + template = 'pytorch' + + if template!='': + template_dir += '-'+template + + template_path = os.path.join(self.path, template_dir) + + if not os.path.isdir(template_path): + return {'return':1, 'error':'template path {} not found'.format(template_path)} + + # Check if preloaded meta exists + meta = { + 'cache':False +# 20240127: Grigori commented that because newly created script meta looks ugly +# 'new_env_keys':[], +# 'new_state_keys':[], +# 'input_mapping':{}, +# 'docker_input_mapping':{}, +# 'deps':[], +# 'prehook_deps':[], +# 'posthook_deps':[], +# 'post_deps':[], +# 'versions':{}, +# 'variations':{}, +# 'input_description':{} + } + + fmeta = os.path.join(template_path, self.cmind.cfg['file_cmeta']) + + r = utils.load_yaml_and_json(fmeta) + if r['return']==0: + utils.merge_dicts({'dict1':meta, 'dict2':r['meta'], 'append_lists':True, 'append_unique':True}) + + # Check meta from CMD + xmeta = i.get('meta',{}) + + if len(xmeta)>0: + utils.merge_dicts({'dict1':meta, 'dict2':xmeta, 'append_lists':True, 'append_unique':True}) + + meta['automation_alias']=self.meta['alias'] + meta['automation_uid']=self.meta['uid'] + meta['tags']=tags_list + + script_name_base = script_name + script_name_ext = '' + if script_name!='': + # separate name and extension + j=script_name.rfind('.') + if j>=0: + script_name_base = script_name[:j] + script_name_ext = script_name[j:] + + meta['script_name'] = script_name_base + + ii['meta']=meta + ii['action']='add' + + use_yaml = True if not i.get('json',False) else False + + if use_yaml: + ii['yaml']=True + + ii['automation']='script,5b4e0237da074764' + + for k in ['parsed_automation', 'parsed_artifact']: + if k in ii: del ii[k] + + if artifact_repo != None: + artifact = ii.get('artifact','') + ii['artifact'] = utils.assemble_cm_object2(artifact_repo) + ':' + artifact + + r_obj=self.cmind.access(ii) + if r_obj['return']>0: return r_obj + + new_script_path = r_obj['path'] + + if console: + print ('Created script in {}'.format(new_script_path)) + + # Copy files from template (only if exist) + files = [ + (template_path, 'README-extra.md', ''), + (template_path, 'customize.py', ''), + (template_path, 'main.py', ''), + (template_path, 'requirements.txt', ''), + (template_path, 'install_deps.bat', ''), + (template_path, 'install_deps.sh', ''), + (template_path, 'plot.bat', ''), + (template_path, 'plot.sh', ''), + (template_path, 'analyze.bat', ''), + (template_path, 'analyze.sh', ''), + (template_path, 'validate.bat', ''), + (template_path, 'validate.sh', '') + ] + + if script_name == '': + files += [(template_path, 'run.bat', ''), + (template_path, 'run.sh', '')] + else: + if script_name_ext == '.bat': + files += [(template_path, 'run.sh', script_name_base+'.sh')] + files += [('', script_name, script_name)] + + else: + files += [(template_path, 'run.bat', script_name_base+'.bat')] + files += [('', script_name, script_name_base+'.sh')] + + + for x in files: + path = x[0] + f1 = x[1] + f2 = x[2] + + if f2 == '': + f2 = f1 + + if path!='': + f1 = os.path.join(path, f1) + + if os.path.isfile(f1): + f2 = os.path.join(new_script_path, f2) + + if console: + print (' * Copying {} to {}'.format(f1, f2)) + + shutil.copyfile(f1,f2) + + return r_obj + + ############################################################################## + def _get_name_for_dynamic_variation_tag(script, variation_tag): + ''' + Returns the variation name in meta for the dynamic_variation_tag + ''' + if "." not in variation_tag or variation_tag[-1] == ".": + return None + return variation_tag[:variation_tag.index(".")+1]+"#" + + + ############################################################################## + def _update_variation_meta_with_dynamic_suffix(script, variation_meta, variation_tag_dynamic_suffix): + ''' + Updates the variation meta with dynamic suffix + ''' + for key in variation_meta: + value = variation_meta[key] + + if type(value) is list: #deps,pre_deps... + for item in value: + if type(item) is dict: + for item_key in item: + item_value = item[item_key] + if type(item_value) is dict: #env,default_env inside deps + for item_key2 in item_value: + item_value[item_key2] = item_value[item_key2].replace("#", variation_tag_dynamic_suffix) + elif type(item_value) is list: #names for example + for i,l_item in enumerate(item_value): + if type(l_item) is str: + item_value[i] = l_item.replace("#", variation_tag_dynamic_suffix) + else: + item[item_key] = item[item_key].replace("#", variation_tag_dynamic_suffix) + + elif type(value) is dict: #add_deps, env, .. + for item in value: + item_value = value[item] + if type(item_value) is dict: #deps + for item_key in item_value: + item_value2 = item_value[item_key] + if type(item_value2) is dict: #env,default_env inside deps + for item_key2 in item_value2: + item_value2[item_key2] = item_value2[item_key2].replace("#", variation_tag_dynamic_suffix) + else: + item_value[item_key] = item_value[item_key].replace("#", variation_tag_dynamic_suffix) + else: + if type(item_value) is list: # lists inside env... + for i,l_item in enumerate(item_value): + if type(l_item) is str: + item_value[i] = l_item.replace("#", variation_tag_dynamic_suffix) + else: + value[item] = value[item].replace("#", variation_tag_dynamic_suffix) + + else: #scalar value + pass #no dynamic update for now + + + ############################################################################## + def _get_variations_with_aliases(script, variation_tags, variations): + ''' + Automatically turn on variation tags which are aliased by any given tag + ''' + import copy + tmp_variation_tags=copy.deepcopy(variation_tags) + + excluded_variations = [ k[1:] for k in variation_tags if k.startswith("-") ] + for i,e in enumerate(excluded_variations): + if e not in variations: + dynamic_tag = script._get_name_for_dynamic_variation_tag(e) + if dynamic_tag and dynamic_tag in variations: + excluded_variations[i] = dynamic_tag + + for k in variation_tags: + if k.startswith("-"): + continue + if k in variations: + variation = variations[k] + else: + variation = variations[script._get_name_for_dynamic_variation_tag(k)] + if 'alias' in variation: + + if variation['alias'] in excluded_variations: + return {'return': 1, 'error': 'Alias "{}" specified for the variation "{}" is conflicting with the excluded variation "-{}" '.format(variation['alias'], k, variation['alias'])} + + if variation['alias'] not in variations: + return {'return': 1, 'error': 'Alias "{}" specified for the variation "{}" is not existing '.format(variation['alias'], k)} + + if 'group' in variation: + return {'return': 1, 'error': 'Incompatible combinations: (alias, group) specified for the variation "{}" '.format(k)} + + if 'default' in variation: + return {'return': 1, 'error': 'Incompatible combinations: (default, group) specified for the variation "{}" '.format(k)} + + if variation['alias'] not in tmp_variation_tags: + tmp_variation_tags.append(variation['alias']) + + return {'return':0, 'variation_tags': tmp_variation_tags, 'excluded_variation_tags': excluded_variations} + + + + ############################################################################## + def _get_variation_groups(script, variations): + + groups = {} + + for k in variations: + variation = variations[k] + if not variation: + continue + if 'group' in variation: + if variation['group'] not in groups: + groups[variation['group']] = {} + groups[variation['group']]['variations'] = [] + groups[variation['group']]['variations'].append(k) + if 'default' in variation: + if 'default' in groups[variation['group']]: + return {'return': 1, 'error': 'Multiple defaults specied for the variation group "{}": "{},{}" '.format(variation['group'], k, groups[variation['group']]['default'])} + groups[variation['group']]['default'] = k + + return {'return': 0, 'variation_groups': groups} + + + ############################################################################## + def _process_variation_tags_in_groups(script, variation_tags, groups, excluded_variations, variations): + import copy + tmp_variation_tags = copy.deepcopy(variation_tags) + tmp_variation_tags_static = copy.deepcopy(variation_tags) + + for v_i in range(len(tmp_variation_tags_static)): + v = tmp_variation_tags_static[v_i] + + if v not in variations: + v_static = script._get_name_for_dynamic_variation_tag(v) + tmp_variation_tags_static[v_i] = v_static + + for k in groups: + group = groups[k] + unique_allowed_variations = group['variations'] + + if len(set(unique_allowed_variations) & set(tmp_variation_tags_static)) > 1: + return {'return': 1, 'error': 'Multiple variation tags selected for the variation group "{}": {} '.format(k, str(set(unique_allowed_variations) & set(tmp_variation_tags_static)))} + if len(set(unique_allowed_variations) & set(tmp_variation_tags_static)) == 0: + if 'default' in group and group['default'] not in excluded_variations: + tmp_variation_tags.append(group['default']) + + return {'return':0, 'variation_tags': tmp_variation_tags} + + + + + + + ############################################################################## + def _call_run_deps(script, deps, local_env_keys, local_env_keys_from_meta, env, state, const, const_state, + add_deps_recursive, recursion_spaces, remembered_selections, variation_tags_string, found_cached, debug_script_tags='', + verbose=False, show_time=False, extra_recursion_spaces=' ', run_state={'deps':[], 'fake_deps':[], 'parent': None}): + if len(deps) == 0: + return {'return': 0} + + # Check chain of post hook dependencies on other CM scripts + import copy + + # Get local env keys + local_env_keys = copy.deepcopy(local_env_keys) + + if len(local_env_keys_from_meta)>0: + local_env_keys += local_env_keys_from_meta + + r = script._run_deps(deps, local_env_keys, env, state, const, const_state, add_deps_recursive, recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, + verbose, show_time, extra_recursion_spaces, run_state) + if r['return']>0: return r + + return {'return': 0} + + ############################################################################## + def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, add_deps_recursive, recursion_spaces, + remembered_selections, variation_tags_string='', from_cache=False, debug_script_tags='', + verbose=False, show_time=False, extra_recursion_spaces=' ', run_state={'deps':[], 'fake_deps':[], 'parent': None}): + """ + Runs all the enabled dependencies and pass them env minus local env + """ + + if len(deps)>0: + # Preserve local env + tmp_env = {} + + variation_groups = run_state.get('variation_groups') + + for d in deps: + + if not d.get('tags'): + continue + + if d.get('skip_if_fake_run', False) and env.get('CM_TMP_FAKE_RUN','')=='yes': + continue + + if "enable_if_env" in d: + if not enable_or_skip_script(d["enable_if_env"], env): + continue + + if "skip_if_env" in d: + if enable_or_skip_script(d["skip_if_env"], env): + continue + + if from_cache and not d.get("dynamic", None): + continue + + update_tags_from_env_with_prefix = d.get("update_tags_from_env_with_prefix", {}) + for t in update_tags_from_env_with_prefix: + for key in update_tags_from_env_with_prefix[t]: + if str(env.get(key, '')).strip() != '': + d['tags']+=","+t+str(env[key]) + + for key in clean_env_keys_deps: + if '?' in key or '*' in key: + import fnmatch + for kk in list(env.keys()): + if fnmatch.fnmatch(kk, key): + tmp_env[kk] = env[kk] + del(env[kk]) + elif key in env: + tmp_env[key] = env[key] + del(env[key]) + + import re + for key in list(env.keys()): + value = env[key] + tmp_values = re.findall(r'<<<(.*?)>>>', str(value)) + if tmp_values == []: continue + tmp_env[key] = env[key] + del(env[key]) + + force_env_keys_deps = d.get("force_env_keys", []) + for key in force_env_keys_deps: + if '?' in key or '*' in key: + import fnmatch + for kk in list(tmp_env.keys()): + if fnmatch.fnmatch(kk, key): + env[kk] = tmp_env[kk] + elif key in tmp_env: + env[key] = tmp_env[key] + + if d.get("reuse_version", False): + for k in tmp_env: + if k.startswith('CM_VERSION'): + env[k] = tmp_env[k] + + update_tags_from_env = d.get("update_tags_from_env", []) + for t in update_tags_from_env: + if env.get(t, '').strip() != '': + d['tags']+=","+env[t] + + inherit_variation_tags = d.get("inherit_variation_tags", False) + skip_inherit_variation_groups = d.get("skip_inherit_variation_groups", []) + variation_tags_to_be_skipped = [] + if inherit_variation_tags: + if skip_inherit_variation_groups: #skips inheriting variations belonging to given groups + for group in variation_groups: + if group in skip_inherit_variation_groups: + variation_tags_to_be_skipped += variation_groups[group]['variations'] + + variation_tags = variation_tags_string.split(",") + variation_tags = [ x for x in variation_tags if not x.startswith("_") or x[1:] not in set(variation_tags_to_be_skipped) ] + + # handle group in case of dynamic variations + for t_variation in variation_tags_to_be_skipped: + if t_variation.endswith(".#"): + beg = t_variation[:-1] + for m_tag in variation_tags: + if m_tag.startswith("_"+beg): + variation_tags.remove(m_tag) + + deps_tags = d['tags'].split(",") + for tag in deps_tags: + if tag.startswith("-_") or tag.startswith("_-"): + variation_tag = "_" + tag[2:] + if variation_tag in variation_tags: + variation_tags.remove(variation_tag) + new_variation_tags_string = ",".join(variation_tags) + d['tags']+=","+new_variation_tags_string #deps should have non-empty tags + + run_state['deps'].append(d['tags']) + + if not run_state['fake_deps']: + import copy + tmp_run_state_deps = copy.deepcopy(run_state['deps']) + run_state['deps'] = [] + tmp_parent = run_state['parent'] + run_state['parent'] = run_state['script_id']+":"+",".join(run_state['script_variation_tags']) + tmp_script_id = run_state['script_id'] + tmp_script_variation_tags = run_state['script_variation_tags'] + + # Run collective script via CM API: + # Not very efficient but allows logging - can be optimized later + ii = { + 'action':'run', + 'automation':utils.assemble_cm_object(self.meta['alias'], self.meta['uid']), + 'recursion_spaces':recursion_spaces, # + extra_recursion_spaces, + 'recursion':True, + 'remembered_selections': remembered_selections, + 'env':env, + 'state':state, + 'const':const, + 'const_state':const_state, + 'add_deps_recursive':add_deps_recursive, + 'debug_script_tags':debug_script_tags, + 'verbose':verbose, + 'time':show_time, + 'run_state':run_state + + } + + for key in [ "env", "state", "const", "const_state" ]: + ii['local_'+key] = d.get(key, {}) + if d.get(key): + d[key] = {} + + utils.merge_dicts({'dict1':ii, 'dict2':d, 'append_lists':True, 'append_unique':True}) + + r = update_env_with_values(ii['env']) #to update env local to a dependency + if r['return']>0: return r + + r = self.cmind.access(ii) + if r['return']>0: return r + + run_state['deps'] = tmp_run_state_deps + run_state['parent'] = tmp_parent + run_state['script_id'] = tmp_script_id + run_state['script_variation_tags'] = tmp_script_variation_tags + + # Restore local env + env.update(tmp_env) + r = update_env_with_values(env) + if r['return']>0: return r + + return {'return': 0} + + ############################################################################## + def _merge_dicts_with_tags(self, dict1, dict2): + """ + Merges two dictionaries and append any tag strings in them + """ + if dict1 == dict2: + return {'return': 0} + for dep in dict1: + if 'tags' in dict1[dep]: + dict1[dep]['tags_list'] = utils.convert_tags_to_list(dict1[dep]) + for dep in dict2: + if 'tags' in dict2[dep]: + dict2[dep]['tags_list'] = utils.convert_tags_to_list(dict2[dep]) + utils.merge_dicts({'dict1':dict1, 'dict2':dict2, 'append_lists':True, 'append_unique':True}) + for dep in dict1: + if 'tags_list' in dict1[dep]: + dict1[dep]['tags'] = ",".join(dict1[dep]['tags_list']) + del(dict1[dep]['tags_list']) + for dep in dict2: + if 'tags_list' in dict2[dep]: + del(dict2[dep]['tags_list']) + + ############################################################################## + def _get_readme(self, cmd_parts, deps): + """ + Outputs a Markdown README file listing the CM run commands for the dependencies + """ + pre = '' + content = pre + heading2 = "## Command to Run\n" + content += heading2 + cmd="cm run script " + for cmd_part in cmd_parts: + cmd = cmd+ " "+cmd_part + content += "\n" + cmd = self._markdown_cmd(cmd) + content = content + cmd + "\n\n" + deps_heading = "## Dependent CM scripts\n" + deps_ = "" + run_cmds = self._get_deps_run_cmds(deps) + i = 1 + for cmd in run_cmds: + deps_ = deps_+ str(i) + ". " + self._markdown_cmd(cmd)+"\n" + i = i+1 + if deps_: + content += deps_heading + content += deps_ + return content + + ############################################################################## + def _markdown_cmd(self, cmd): + """ + Returns a CM command in markdown format + """ + return '```bash\n '+cmd+' \n ```' + + + ############################################################################## + def _print_deps(self, deps): + """ + Prints the CM run commands for the list of CM script dependencies + """ + print_deps_data = [] + run_cmds = self._get_deps_run_cmds(deps) + for cmd in run_cmds: + print_deps_data.append(cmd) + print(cmd) + return print_deps_data + + + ############################################################################## + def _get_deps_run_cmds(self, deps): + """ + Returns the CM run commands for the list of CM script dependencies + """ + run_cmds = [] + for dep_tags in deps: + run_cmds.append("cm run script --tags="+dep_tags) + return run_cmds + + + + + + ############################################################################## + def run_native_script(self, i): + """ + Run native script in a CM script entry + (wrapper around "prepare_and_run_script_with_postprocessing" function) + + Args: + (dict): + + run_script_input (dict): saved input for "prepare_and_run_script_with_postprocessing" function + env (dict): the latest environment for the script + script_name (str): native script name + + Returns: + (dict): Output from "prepare_and_run_script_with_postprocessing" function + + + """ + + import copy + + run_script_input = i['run_script_input'] + script_name = i['script_name'] + env = i.get('env','') + + # Create and work on a copy to avoid contamination + env_copy = copy.deepcopy(run_script_input.get('env',{})) + run_script_input_state_copy = copy.deepcopy(run_script_input.get('state',{})) + script_name_copy = run_script_input.get('script_name','') + + run_script_input['script_name'] = script_name + run_script_input['env'] = env + + r = prepare_and_run_script_with_postprocessing(run_script_input, postprocess="") + + env_tmp = copy.deepcopy(run_script_input['env']) + r['env_tmp'] = env_tmp + + run_script_input['state'] = run_script_input_state_copy + run_script_input['env'] = env_copy + run_script_input['script_name'] = script_name_copy + + return r + + ############################################################################## + def find_file_in_paths(self, i): + """ + Find file name in a list of paths + + Args: + (CM input dict): + + paths (list): list of paths + file_name (str): filename pattern to find + (select) (bool): if True and more than 1 path found, select + (select_default) (bool): if True, select the default one + (recursion_spaces) (str): add space to print + (run_script_input) (dict): prepared dict to run script and detect version + + (detect_version) (bool): if True, attempt to detect version + (env_path) (str): env key to pass path to the script to detect version + (run_script_input) (dict): use this input to run script to detect version + (env) (dict): env to check/force version + + (hook) (func): call this func to skip some artifacts + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (found_files) (list): paths to files when found + + """ + import copy + + paths = i['paths'] + select = i.get('select',False) + select_default = i.get('select_default', False) + recursion_spaces = i.get('recursion_spaces','') + + hook = i.get('hook', None) + + verbose = i.get('verbose', False) + if not verbose: verbose = i.get('v', False) + + file_name = i.get('file_name', '') + file_name_re = i.get('file_name_re', '') + file_is_re = False + + if file_name_re != '': + file_name = file_name_re + file_is_re = True + + if file_name == '': + raise Exception('file_name or file_name_re not specified in find_artifact') + + found_files = [] + + import glob + import re + + for path in paths: + # May happen that path is in variable but it doesn't exist anymore + if os.path.isdir(path): + if file_is_re: + file_list = [os.path.join(path,f) for f in os.listdir(path) if re.match(file_name, f)] + + for f in file_list: + duplicate = False + for existing in found_files: + if os.path.samefile(existing, f): + duplicate = True + break + if not duplicate: + skip = False + if hook!=None: + r=hook({'file':f}) + if r['return']>0: return r + skip = r['skip'] + if not skip: + found_files.append(f) + + else: + path_to_file = os.path.join(path, file_name) + + file_pattern_suffixes = [ + "", + ".[0-9]", + ".[0-9][0-9]", + "-[0-9]", + "-[0-9][0-9]", + "[0-9]", + "[0-9][0-9]", + "[0-9].[0-9]", + "[0-9][0-9].[0-9]", + "[0-9][0-9].[0-9][0-9]" + ] + + for suff in file_pattern_suffixes: + file_list = glob.glob(path_to_file + suff) + for f in file_list: + duplicate = False + + for existing in found_files: + try: + if os.path.samefile(existing, f): + duplicate = True + break + except Exception as e: + # This function fails on Windows sometimes + # because some files can't be accessed + pass + + if not duplicate: + skip = False + if hook!=None: + r=hook({'file':f}) + if r['return']>0: return r + skip = r['skip'] + if not skip: + found_files.append(f) + + + if select: + # Check and prune versions + if i.get('detect_version', False): + found_paths_with_good_version = [] + found_files_with_good_version = [] + + env = i.get('env', {}) + + run_script_input = i['run_script_input'] + env_path_key = i['env_path_key'] + + version = env.get('CM_VERSION', '') + version_min = env.get('CM_VERSION_MIN', '') + version_max = env.get('CM_VERSION_MAX', '') + + x = '' + + if version != '': x += ' == {}'.format(version) + if version_min != '': x += ' >= {}'.format(version_min) + if version_max != '': x += ' <= {}'.format(version_max) + + if x!='': + print (recursion_spaces + ' - Searching for versions: {}'.format(x)) + + new_recursion_spaces = recursion_spaces + ' ' + + + for path_to_file in found_files: + + print ('') + print (recursion_spaces + ' * ' + path_to_file) + + run_script_input['env'] = env + run_script_input['env'][env_path_key] = path_to_file + run_script_input['recursion_spaces'] = new_recursion_spaces + + rx = prepare_and_run_script_with_postprocessing(run_script_input, postprocess="detect_version") + + run_script_input['recursion_spaces'] = recursion_spaces + + if rx['return']>0: + if rx['return'] != 2: + return rx + else: + # Version was detected + detected_version = rx.get('version','') + + if detected_version != '': + if detected_version == -1: + print (recursion_spaces + ' SKIPPED due to incompatibility ...') + else: + ry = check_version_constraints({'detected_version': detected_version, + 'version': version, + 'version_min': version_min, + 'version_max': version_max, + 'cmind':self.cmind}) + if ry['return']>0: return ry + + if not ry['skip']: + found_files_with_good_version.append(path_to_file) + else: + print (recursion_spaces + ' SKIPPED due to version constraints ...') + + found_files = found_files_with_good_version + + # Continue with selection + if len(found_files)>1: + if len(found_files) == 1 or select_default: + selection = 0 + else: + # Select 1 and proceed + print (recursion_spaces+' - More than 1 path found:') + + print ('') + num = 0 + + for file in found_files: + print (recursion_spaces+' {}) {}'.format(num, file)) + num += 1 + + print ('') + x=input(recursion_spaces+' Make your selection or press Enter for 0: ') + + x=x.strip() + if x=='': x='0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + print ('') + print (recursion_spaces+' Selected {}: {}'.format(selection, found_files[selection])) + + found_files = [found_files[selection]] + + return {'return':0, 'found_files':found_files} + + ############################################################################## + def detect_version_using_script(self, i): + """ + Detect version using script + + Args: + (CM input dict): + + (recursion_spaces) (str): add space to print + + run_script_input (dict): use this input to run script to detect version + (env) (dict): env to check/force version + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + 16 if not detected + * (error) (str): error string if return>0 + + (detected_version) (str): detected version + + """ + recursion_spaces = i.get('recursion_spaces','') + + import copy + + detected = False + + env = i.get('env', {}) + + run_script_input = i['run_script_input'] + + version = env.get('CM_VERSION', '') + version_min = env.get('CM_VERSION_MIN', '') + version_max = env.get('CM_VERSION_MAX', '') + + x = '' + + if version != '': x += ' == {}'.format(version) + if version_min != '': x += ' >= {}'.format(version_min) + if version_max != '': x += ' <= {}'.format(version_max) + + if x!='': + print (recursion_spaces + ' - Searching for versions: {}'.format(x)) + + new_recursion_spaces = recursion_spaces + ' ' + + run_script_input['recursion_spaces'] = new_recursion_spaces + run_script_input['env'] = env + + # Prepare run script + rx = prepare_and_run_script_with_postprocessing(run_script_input, postprocess="detect_version") + + run_script_input['recursion_spaces'] = recursion_spaces + + if rx['return'] == 0: + # Version was detected + detected_version = rx.get('version','') + + if detected_version != '': + ry = check_version_constraints({'detected_version': detected_version, + 'version': version, + 'version_min': version_min, + 'version_max': version_max, + 'cmind':self.cmind}) + if ry['return']>0: return ry + + if not ry['skip']: + return {'return':0, 'detected_version':detected_version} + + return {'return':16, 'error':'version was not detected'} + + ############################################################################## + def find_artifact(self, i): + """ + Find some artifact (file) by name + + Args: + (CM input dict): + + file_name (str): filename to find + + env (dict): global env + os_info (dict): OS info + + (detect_version) (bool): if True, attempt to detect version + (env_path) (str): env key to pass path to the script to detect version + (run_script_input) (dict): use this input to run script to detect version + + (default_path_env_key) (str): check in default paths from global env + (PATH, PYTHONPATH, LD_LIBRARY_PATH ...) + + (recursion_spaces) (str): add space to print + + (hook) (func): call this func to skip some artifacts + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + error = 16 if artifact not found but no problem + + found_path (list): found path to an artifact + full_path (str): full path to a found artifact + default_path_list (list): list of default paths + + """ + + import copy + + file_name = i['file_name'] + + os_info = i['os_info'] + + env = i['env'] + + env_path_key = i.get('env_path_key', '') + + run_script_input = i.get('run_script_input', {}) + extra_paths = i.get('extra_paths', {}) + + # Create and work on a copy to avoid contamination + env_copy = copy.deepcopy(env) + run_script_input_state_copy = copy.deepcopy(run_script_input.get('state',{})) + + default_path_env_key = i.get('default_path_env_key', '') + recursion_spaces = i.get('recursion_spaces', '') + + hook = i.get('hook', None) + + # Check if forced to search in a specific path or multiple paths + # separated by OS var separator (usually : or ;) + path = env.get('CM_TMP_PATH','') + + if path!='' and env.get('CM_TMP_PATH_IGNORE_NON_EXISTANT','')!='yes': + # Can be a list of paths + path_list_tmp = path.split(os_info['env_separator']) + for path_tmp in path_list_tmp: + if path_tmp.strip()!='' and not os.path.isdir(path_tmp): + return {'return':1, 'error':'path {} doesn\'t exist'.format(path_tmp)} + + # Check if forced path and file name from --input (CM_INPUT - local env - will not be visible for higher-level script) + forced_file = env.get('CM_INPUT','').strip() + if forced_file != '': + if not os.path.isfile(forced_file): + return {'return':1, 'error':'file {} doesn\'t exist'.format(forced_file)} + + file_name = os.path.basename(forced_file) + path = os.path.dirname(forced_file) + + default_path_list = self.get_default_path_list(i) + #[] if default_path_env_key == '' else \ + # os.environ.get(default_path_env_key,'').split(os_info['env_separator']) + + + if path == '': + path_list_tmp = default_path_list + else: + print (recursion_spaces + ' # Requested paths: {}'.format(path)) + path_list_tmp = path.split(os_info['env_separator']) + + # Check soft links + path_list_tmp2 = [] + for path_tmp in path_list_tmp: +# path_tmp_abs = os.path.realpath(os.path.join(path_tmp, file_name)) +# GF: I remarked above code because it doesn't work correcly +# for virtual python - it unsoftlinks virtual python and picks up +# native one from /usr/bin thus making workflows work incorrectly ... + path_tmp_abs = os.path.join(path_tmp, file_name) + + if not path_tmp_abs in path_list_tmp2: + path_list_tmp2.append(path_tmp_abs) + + path_list = [] + for path_tmp in path_list_tmp2: + path_list.append(os.path.dirname(path_tmp)) + + # Check if quiet + select_default = True if env.get('CM_QUIET','') == 'yes' else False + + # Prepare paths to search + r = self.find_file_in_paths({'paths': path_list, + 'file_name': file_name, + 'select': True, + 'select_default': select_default, + 'detect_version': i.get('detect_version', False), + 'env_path_key': env_path_key, + 'env':env_copy, + 'hook':hook, + 'run_script_input': run_script_input, + 'recursion_spaces': recursion_spaces}) + + run_script_input['state'] = run_script_input_state_copy + + if r['return']>0: return r + + found_files = r['found_files'] + + if len(found_files)==0: + return {'return':16, 'error':'{} not found'.format(file_name)} + + # Finalize output + file_path = found_files[0] + found_path = os.path.dirname(file_path) + + if found_path not in default_path_list: + env_key = '+'+default_path_env_key + + paths = env.get(env_key, []) + if found_path not in paths: + paths.insert(0, found_path) + env[env_key] = paths + for extra_path in extra_paths: + epath = os.path.normpath(os.path.join(found_path, "..", extra_path)) + if os.path.exists(epath): + if extra_paths[extra_path] not in env: + env[extra_paths[extra_path]] = [] + env[extra_paths[extra_path]].append(epath) + print () + print (recursion_spaces + ' # Found artifact in {}'.format(file_path)) + + if env_path_key != '': + env[env_path_key] = file_path + + return {'return':0, 'found_path':found_path, + 'found_file_path':file_path, + 'found_file_name':os.path.basename(file_path), + 'default_path_list': default_path_list} + + ############################################################################## + def find_file_deep(self, i): + """ + Find file name in a list of paths + + Args: + (CM input dict): + + paths (list): list of paths + file_name (str): filename pattern to find + (restrict_paths) (list): restrict found paths to these combinations + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (found_paths) (list): paths to files when found + + """ + + paths = i['paths'] + file_name = i['file_name'] + + restrict_paths = i.get('restrict_paths',[]) + + found_paths = [] + + for p in paths: + if os.path.isdir(p): + p1 = os.listdir(p) + for f in p1: + p2 = os.path.join(p, f) + + if os.path.isdir(p2): + r = self.find_file_deep({'paths':[p2], 'file_name': file_name, 'restrict_paths':restrict_paths}) + if r['return']>0: return r + + found_paths += r['found_paths'] + else: + if f == file_name: + found_paths.append(p) + break + + if len(found_paths) > 0 and len(restrict_paths) > 0: + filtered_found_paths = [] + + for p in found_paths: + for f in restrict_paths: + if f in p: + filtered_found_paths.append(p) + break + + found_paths = filtered_found_paths + + return {'return':0, 'found_paths':found_paths} + + ############################################################################## + def find_file_back(self, i): + """ + Find file name backwards + + Args: + (CM input dict): + + path (str): path to start with + file_name (str): filename or directory to find + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (found_path) (str): path if found or empty + + """ + + path = i['path'] + file_name = i['file_name'] + + found_path = '' + + while path != '': + path_to_file = os.path.join(path, file_name) + if os.path.isfile(path_to_file): + break + + path2 = os.path.dirname(path) + + if path2 == path: + path = '' + break + else: + path = path2 + + return {'return':0, 'found_path':path} + + ############################################################################## + def parse_version(self, i): + """ + Parse version (used in post processing functions) + + Args: + (CM input dict): + + (file_name) (str): filename to get version from (tmp-ver.out by default) + match_text (str): RE match text string + group_number (int): RE group number to get version from + env_key (str): which env key to update + which_env (dict): which env to update + (debug) (boolean): if True, print some debug info + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + version (str): detected version + string (str): full file string + + """ + + file_name = i.get('file_name','') + if file_name == '': file_name = self.tmp_file_ver + + match_text = i['match_text'] + group_number = i['group_number'] + env_key = i['env_key'] + which_env = i['which_env'] + debug = i.get('debug', False) + + r = utils.load_txt(file_name = file_name, + check_if_exists = True, + split = True, + match_text = match_text, + fail_if_no_match = 'version was not detected') + if r['return']>0: + if r.get('string','')!='': + r['error'] += ' ({})'.format(r['string']) + return r + + string = r['string'] + + version = r['match'].group(group_number) + + which_env[env_key] = version + which_env['CM_DETECTED_VERSION'] = version # to be recorded in the cache meta + + return {'return':0, 'version':version, 'string':string} + + ############################################################################## + def update_deps(self, i): + """ + Update deps from pre/post processing + Args: + (CM input dict): + deps (dict): deps dict + update_deps (dict): key matches "names" in deps + Returns: + (CM return dict): + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + deps = i['deps'] + add_deps = i['update_deps'] + update_deps(deps, add_deps, False) + + return {'return':0} + + ############################################################################## + def get_default_path_list(self, i): + default_path_env_key = i.get('default_path_env_key', '') + os_info = i['os_info'] + default_path_list = [] if default_path_env_key == '' else \ + os.environ.get(default_path_env_key,'').split(os_info['env_separator']) + + return default_path_list + + + + ############################################################ + def doc(self, i): + """ + Document CM script. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (../docs by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module(self, __file__, 'module_misc', 'doc', i) + + ############################################################ + def gui(self, i): + """ + Run GUI for CM script. + + Args: + (CM input dict): + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + artifact = i.get('artifact', '') + tags = '' + if artifact != '': + if ' ' in artifact: + tags = artifact.replace(' ',',') + + if tags=='': + tags = i.get('tags','') + + if 'tags' in i: + del(i['tags']) + + i['action']='run' + i['artifact']='gui' + i['parsed_artifact']=[('gui','605cac42514a4c69')] + i['script']=tags.replace(',',' ') + + return self.cmind.access(i) + + + + ############################################################ + def dockerfile(self, i): + """ + Generate Dockerfile for CM script. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module(self, __file__, 'module_misc', 'dockerfile', i) + + ############################################################ + def docker(self, i): + """ + Run CM script in an automatically-generated container. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module(self, __file__, 'module_misc', 'docker', i) + + + ############################################################################## + def _available_variations(self, i): + """ + return error with available variations + + Args: + (CM input dict): + + meta (dict): meta of the script + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + 16 if not detected + * (error) (str): error string if return>0 + + """ + + meta = i['meta'] + + list_of_variations = sorted(['_'+v for v in list(meta.get('variations',{}.keys()))]) + + return {'return':1, 'error':'python package variation is not defined in "{}". Available: {}'.format(meta['alias'],' '.join(list_of_variations))} + + ############################################################ + def prepare(self, i): + """ + Run CM script with --fake_run only to resolve deps + """ + + i['fake_run']=True + + return self.run(i) + + ############################################################ + # Reusable blocks for some scripts + def clean_some_tmp_files(self, i): + """ + Clean tmp files + """ + + env = i.get('env',{}) + + cur_work_dir = env.get('CM_TMP_CURRENT_SCRIPT_WORK_PATH','') + if cur_work_dir !='' and os.path.isdir(cur_work_dir): + for x in ['tmp-run.bat', 'tmp-state.json']: + xx = os.path.join(cur_work_dir, x) + if os.path.isfile(xx): + os.remove(xx) + + return {'return':0} + + + +############################################################################## +def find_cached_script(i): + """ + Internal automation function: find cached script + + Args: + (CM input dict): + + deps (dict): deps dict + update_deps (dict): key matches "names" in deps + + Returns: + (CM return dict): + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + import copy + + recursion_spaces = i['recursion_spaces'] + script_tags = i['script_tags'] + cached_tags = [] + found_script_tags = i['found_script_tags'] + variation_tags = i['variation_tags'] + explicit_variation_tags = i['explicit_variation_tags'] + version = i['version'] + version_min = i['version_min'] + version_max = i['version_max'] + extra_cache_tags = i['extra_cache_tags'] + new_cache_entry = i['new_cache_entry'] + meta = i['meta'] + env = i['env'] + self_obj = i['self'] + skip_remembered_selections = i['skip_remembered_selections'] + remembered_selections = i['remembered_selections'] + quiet = i['quiet'] + search_tags = '' + + verbose = i.get('verbose', False) + if not verbose: verbose = i.get('v', False) + + found_cached_scripts = [] + + if verbose: + print (recursion_spaces + ' - Checking if script execution is already cached ...') + + # Create a search query to find that we already ran this script with the same or similar input + # It will be gradually enhanced with more "knowledge" ... + if len(script_tags)>0: + for x in script_tags: + if x not in cached_tags: + cached_tags.append(x) + + if len(found_script_tags)>0: + for x in found_script_tags: + if x not in cached_tags: + cached_tags.append(x) + + explicit_cached_tags=copy.deepcopy(cached_tags) + + if len(explicit_variation_tags)>0: + explicit_variation_tags_string = '' + + for t in explicit_variation_tags: + if explicit_variation_tags_string != '': + explicit_variation_tags_string += ',' + if t.startswith("-"): + x = "-_" + t[1:] + else: + x = '_' + t + explicit_variation_tags_string += x + + if x not in explicit_cached_tags: + explicit_cached_tags.append(x) + + if verbose: + print (recursion_spaces+' - Prepared explicit variations: {}'.format(explicit_variation_tags_string)) + + if len(variation_tags)>0: + variation_tags_string = '' + + for t in variation_tags: + if variation_tags_string != '': + variation_tags_string += ',' + if t.startswith("-"): + x = "-_" + t[1:] + else: + x = '_' + t + variation_tags_string += x + + if x not in cached_tags: + cached_tags.append(x) + + if verbose: + print (recursion_spaces+' - Prepared variations: {}'.format(variation_tags_string)) + + # Add version + if version !='': + if 'version-'+version not in cached_tags: + cached_tags.append('version-'+version) + explicit_cached_tags.append('version-'+version) + + # Add extra cache tags (such as "virtual" for python) + if len(extra_cache_tags)>0: + for t in extra_cache_tags: + if t not in cached_tags: + cached_tags.append(t) + explicit_cached_tags.append(t) + + # Add tags from deps (will be also duplicated when creating new cache entry) + extra_cache_tags_from_env = meta.get('extra_cache_tags_from_env',[]) + for extra_cache_tags in extra_cache_tags_from_env: + key = extra_cache_tags['env'] + prefix = extra_cache_tags.get('prefix','') + + v = env.get(key,'').strip() + if v!='': + for t in v.split(','): + x = 'deps-' + prefix + t + if x not in cached_tags: + cached_tags.append(x) + explicit_cached_tags.append(x) + + # Check if already cached + if not new_cache_entry: + search_tags = '-tmp' + if len(cached_tags) >0 : + search_tags += ',' + ','.join(explicit_cached_tags) + + if verbose: + print (recursion_spaces+' - Searching for cached script outputs with the following tags: {}'.format(search_tags)) + + r = self_obj.cmind.access({'action':'find', + 'automation':self_obj.meta['deps']['cache'], + 'tags':search_tags}) + if r['return']>0: return r + + found_cached_scripts = r['list'] + + # Check if selection is remembered + if not skip_remembered_selections and len(found_cached_scripts) > 1: + # Need to add extra cached tags here (since recorded later) + for selection in remembered_selections: + if selection['type'] == 'cache' and set(selection['tags'].split(',')) == set(search_tags.split(',')): + tmp_version_in_cached_script = selection['cached_script'].meta.get('version','') + + skip_cached_script = check_versions(self_obj.cmind, tmp_version_in_cached_script, version_min, version_max) + + if skip_cached_script: + return {'return':2, 'error':'The version of the previously remembered selection for a given script ({}) mismatches the newly requested one'.format(tmp_version_in_cached_script)} + else: + found_cached_scripts = [selection['cached_script']] + if verbose: + print (recursion_spaces + ' - Found remembered selection with tags "{}"!'.format(search_tags)) + break + + + if len(found_cached_scripts) > 0: + selection = 0 + + # Check version ranges ... + new_found_cached_scripts = [] + + for cached_script in found_cached_scripts: + skip_cached_script = False + dependent_cached_path = cached_script.meta.get('dependent_cached_path', '') + if dependent_cached_path: + if not os.path.exists(dependent_cached_path): + #Need to rm this cache entry + skip_cached_script = True + continue + + if not skip_cached_script: + cached_script_version = cached_script.meta.get('version', '') + + skip_cached_script = check_versions(self_obj.cmind, cached_script_version, version_min, version_max) + + if not skip_cached_script: + new_found_cached_scripts.append(cached_script) + + found_cached_scripts = new_found_cached_scripts + + return {'return':0, 'cached_tags':cached_tags, 'search_tags':search_tags, 'found_cached_scripts':found_cached_scripts} + + +############################################################################## +def enable_or_skip_script(meta, env): + """ + Internal: enable a dependency based on enable_if_env and skip_if_env meta information + """ + for key in meta: + if key in env: + value = str(env[key]).lower() + + meta_key = [str(v).lower() for v in meta[key]] + + if set(meta_key) & set(["yes", "on", "true", "1"]): + if value not in ["no", "off", "false", "0"]: + continue + elif set(meta_key) & set(["no", "off", "false", "0"]): + if value in ["no", "off", "false", "0"]: + continue + elif value in meta_key: + continue + return False + return True + +############################################################################################################ +def update_env_with_values(env, fail_on_not_found=False): + """ + Update any env key used as part of values in meta + """ + import re + for key in env: + if key.startswith("+") and type(env[key]) != list: + return {'return': 1, 'error': 'List value expected for {} in env'.format(key)} + + value = env[key] + + # Check cases such as --env.CM_SKIP_COMPILE + if type(value)==bool: + env[key] = str(value) + continue + + tmp_values = re.findall(r'<<<(.*?)>>>', str(value)) + + if not tmp_values: + if key == 'CM_GIT_URL' and env.get('CM_GIT_AUTH', "no") == "yes": + if 'CM_GH_TOKEN' in env and '@' not in env['CM_GIT_URL']: + params = {} + params["token"] = env['CM_GH_TOKEN'] + value = get_git_url("token", value, params) + elif 'CM_GIT_SSH' in env: + value = get_git_url("ssh", value) + env[key] = value + + continue + + for tmp_value in tmp_values: + if tmp_value not in env and fail_on_not_found: + return {'return':1, 'error':'variable {} is not in env'.format(tmp_value)} + if tmp_value in env: + value = value.replace("<<<"+tmp_value+">>>", str(env[tmp_value])) + + env[key] = value + + return {'return': 0} + + +############################################################################## +def check_version_constraints(i): + """ + Internal: check version constaints and skip script artifact if constraints are not met + """ + + detected_version = i['detected_version'] + + version = i.get('version', '') + version_min = i.get('version_min', '') + version_max = i.get('version_max', '') + + cmind = i['cmind'] + + skip = False + + if version != '' and version != detected_version: + skip = True + + if not skip and detected_version != '' and version_min != '': + ry = cmind.access({'action':'compare_versions', + 'automation':'utils,dc2743f8450541e3', + 'version1':detected_version, + 'version2':version_min}) + if ry['return']>0: return ry + + if ry['comparison'] < 0: + skip = True + + if not skip and detected_version != '' and version_max != '': + ry = cmind.access({'action':'compare_versions', + 'automation':'utils,dc2743f8450541e3', + 'version1':detected_version, + 'version2':version_max}) + if ry['return']>0: return ry + + if ry['comparison'] > 0: + skip = True + + return {'return':0, 'skip':skip} + + +############################################################################## +def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): + """ + Internal: prepare and run script with postprocessing that can be reused for version check + """ + + path = i['path'] + bat_ext = i['bat_ext'] + os_info = i['os_info'] + customize_code = i.get('customize_code', None) + customize_common_input = i.get('customize_common_input',{}) + + env = i.get('env', {}) + const = i.get('const', {}) + state = i.get('state', {}) + const_state = i.get('const_state', {}) + run_state = i.get('run_state', {}) + verbose = i.get('verbose', False) + if not verbose: verbose = i.get('v', False) + + show_time = i.get('time', False) + + recursion = i.get('recursion', False) + found_script_tags = i.get('found_script_tags', []) + debug_script_tags = i.get('debug_script_tags', '') + + meta = i.get('meta',{}) + + reuse_cached = i.get('reused_cached', False) + recursion_spaces = i.get('recursion_spaces', '') + + tmp_file_run_state = i.get('tmp_file_run_state', '') + tmp_file_run_env = i.get('tmp_file_run_env', '') + tmp_file_state = i.get('tmp_file_state', '') + tmp_file_run = i['tmp_file_run'] + local_env_keys = i.get('local_env_keys', []) + local_env_keys_from_meta = i.get('local_env_keys_from_meta', []) + posthook_deps = i.get('posthook_deps', []) + add_deps_recursive = i.get('add_deps_recursive', {}) + recursion_spaces = i['recursion_spaces'] + remembered_selections = i.get('remembered_selections', {}) + variation_tags_string = i.get('variation_tags_string', '') + found_cached = i.get('found_cached', False) + script_automation = i['self'] + + repro_prefix = i.get('repro_prefix', '') + + # Prepare script name + check_if_run_script_exists = False + script_name = i.get('script_name','').strip() + if script_name == '': + script_name = meta.get('script_name','').strip() + if script_name !='': + # Script name was added by user - we need to check that it really exists (on Linux or Windows) + check_if_run_script_exists = True + if script_name == '': + # Here is the default script name - if it doesn't exist, we skip it. + # However, if it's explicitly specified, we check it and report + # if it's missing ... + script_name = 'run' + + if bat_ext == '.sh': + run_script = get_script_name(env, path, script_name) + else: + run_script = script_name + bat_ext + + path_to_run_script = os.path.join(path, run_script) + + if check_if_run_script_exists and not os.path.isfile(path_to_run_script): + return {'return':16, 'error':'script {} not found - please add one'.format(path_to_run_script)} + + # Update env and state with const + utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':state, 'dict2':const_state, 'append_lists':True, 'append_unique':True}) + + # Update env with the current path + if os_info['platform'] == 'windows' and ' ' in path: + path = '"' + path + '"' + + cur_dir = os.getcwd() + + env['CM_TMP_CURRENT_SCRIPT_PATH'] = path + env['CM_TMP_CURRENT_SCRIPT_WORK_PATH'] = cur_dir + + # Record state + if tmp_file_state != '': + r = utils.save_json(file_name = tmp_file_state, meta = state) + if r['return']>0: return r + + rr = {'return':0} + + # If batch file exists, run it with current env and state + if os.path.isfile(path_to_run_script) and not reuse_cached: + if tmp_file_run_state != '' and os.path.isfile(tmp_file_run_state): + os.remove(tmp_file_run_state) + if tmp_file_run_env != '' and os.path.isfile(tmp_file_run_env): + os.remove(tmp_file_run_env) + + run_script = tmp_file_run + bat_ext + + if verbose: + print ('') + print (recursion_spaces + ' - Running native script "{}" from temporal script "{}" in "{}" ...'.format(path_to_run_script, run_script, cur_dir)) + print ('') + + print (recursion_spaces + ' ! cd {}'.format(cur_dir)) + print (recursion_spaces + ' ! call {} from {}'.format(path_to_run_script, run_script)) + + + # Prepare env variables + import copy + script = copy.deepcopy(os_info['start_script']) + + # Check if script_prefix in the state from other components + script_prefix = state.get('script_prefix',[]) + if len(script_prefix)>0: +# script = script_prefix + ['\n'] + script + script += script_prefix + ['\n'] + + script += convert_env_to_script(env, os_info) + + # Check if run bash/cmd before running the command (for debugging) + if debug_script_tags !='' and all(item in found_script_tags for item in debug_script_tags.split(',')): + x=['cmd', '.', '','.bat'] if os_info['platform'] == 'windows' else ['bash', ' ""', '"','.sh'] + + script.append('\n') + script.append('echo{}\n'.format(x[1])) + script.append('echo {}Running debug shell. Type exit to resume script execution ...{}\n'.format(x[2],x[3],x[2])) + script.append('echo{}\n'.format(x[1])) + script.append('\n') + script.append(x[0]) + + # Append batch file to the tmp script + script.append('\n') + script.append(os_info['run_bat'].replace('${bat_file}', '"'+path_to_run_script+'"') + '\n') + + # Prepare and run script + r = record_script(run_script, script, os_info) + if r['return']>0: return r + + # Run final command + cmd = os_info['run_local_bat_from_python'].replace('${bat_file}', run_script) + + rc = os.system(cmd) + + if rc>0 and not i.get('ignore_script_error', False): + # Check if print files when error + print_files = meta.get('print_files_if_script_error', []) + if len(print_files)>0: + for pr in print_files: + if os.path.isfile(pr): + r = utils.load_txt(file_name = pr) + if r['return'] == 0: + print ("========================================================") + print ("Print file {}:".format(pr)) + print ("") + print (r['string']) + print ("") + + note = ''' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Note that it may be a portability issue of a third-party tool or a native script +wrapped and unified by this automation recipe (CM script). In such case, +please report this issue with a full log at "https://github.com/mlcommons/ck". +The CM concept is to collaboratively fix such issues inside portable CM scripts +to make existing tools and native scripts more portable, interoperable +and deterministic. Thank you''' + + rr = {'return':2, 'error':'Portable CM script failed (name = {}, return code = {})\n\n{}'.format(meta['alias'], rc, note)} + + if repro_prefix != '': + dump_repro(repro_prefix, rr, run_state) + + return rr + + # Load updated state if exists + if tmp_file_run_state != '' and os.path.isfile(tmp_file_run_state): + r = utils.load_json(file_name = tmp_file_run_state) + if r['return']>0: return r + + updated_state = r['meta'] + + utils.merge_dicts({'dict1':state, 'dict2':updated_state, 'append_lists':True, 'append_unique':True}) + + # Load updated env if exists + if tmp_file_run_env != '' and os.path.isfile(tmp_file_run_env): + r = utils.load_txt(file_name = tmp_file_run_env) + if r['return']>0: return r + + r = utils.convert_env_to_dict(r['string']) + if r['return']>0: return r + + updated_env = r['dict'] + + utils.merge_dicts({'dict1':env, 'dict2':updated_env, 'append_lists':True, 'append_unique':True}) + + + if postprocess != '' and customize_code is not None: + print (recursion_spaces+' ! call "{}" from {}'.format(postprocess, customize_code.__file__)) + + if len(posthook_deps)>0 and (postprocess == "postprocess"): + r = script_automation._call_run_deps(posthook_deps, local_env_keys, local_env_keys_from_meta, env, state, const, const_state, + add_deps_recursive, recursion_spaces, remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, ' ', run_state) + if r['return']>0: return r + + if (postprocess == "postprocess") and customize_code is not None and 'postprocess' in dir(customize_code): + rr = run_postprocess(customize_code, customize_common_input, recursion_spaces, env, state, const, + const_state, meta, verbose, i) # i as run_script_input + elif (postprocess == "detect_version") and customize_code is not None and 'detect_version' in dir(customize_code): + rr = run_detect_version(customize_code, customize_common_input, recursion_spaces, env, state, const, + const_state, meta, verbose) + + return rr + +############################################################################## +def run_detect_version(customize_code, customize_common_input, recursion_spaces, env, state, const, const_state, meta, verbose=False): + + if customize_code is not None and 'detect_version' in dir(customize_code): + import copy + + if verbose: + print (recursion_spaces+' - Running detect_version ...') + + # Update env and state with const + utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':state, 'dict2':const_state, 'append_lists':True, 'append_unique':True}) + + ii = copy.deepcopy(customize_common_input) + ii['env'] = env + ii['state'] = state + ii['meta'] = meta + + r = customize_code.detect_version(ii) + return r + + return {'return': 0} + +############################################################################## +def run_postprocess(customize_code, customize_common_input, recursion_spaces, env, state, const, const_state, meta, verbose=False, run_script_input=None): + + if customize_code is not None and 'postprocess' in dir(customize_code): + import copy + + if verbose: + print (recursion_spaces+' - Running postprocess ...') + + # Update env and state with const + utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':state, 'dict2':const_state, 'append_lists':True, 'append_unique':True}) + + ii = copy.deepcopy(customize_common_input) + ii['env'] = env + ii['state'] = state + ii['meta'] = meta + + if run_script_input != None: + ii['run_script_input'] = run_script_input + + r = customize_code.postprocess(ii) + return r + + return {'return': 0} + +############################################################################## +def get_script_name(env, path, script_name = 'run'): + """ + Internal: find the most appropriate run script name for the detected OS + """ + + from os.path import exists + + tmp_suff1 = env.get('CM_HOST_OS_FLAVOR', '') + tmp_suff2 = env.get('CM_HOST_OS_VERSION', '') + tmp_suff3 = env.get('CM_HOST_PLATFORM_FLAVOR', '') + + if exists(os.path.join(path, script_name+'-' + tmp_suff1 + '-'+ tmp_suff2 + '-' + tmp_suff3 + '.sh')): + return script_name+'-' + tmp_suff1 + '-' + tmp_suff2 + '-' + tmp_suff3 + '.sh' + elif exists(os.path.join(path, script_name+'-' + tmp_suff1 + '-' + tmp_suff3 + '.sh')): + return script_name+'-' + tmp_suff1 + '-' + tmp_suff3 + '.sh' + elif exists(os.path.join(path, script_name+'-' + tmp_suff1 + '-' + tmp_suff2 + '.sh')): + return script_name+'-' + tmp_suff1 + '-' + tmp_suff2 + '.sh' + elif exists(os.path.join(path, script_name+'-' + tmp_suff1 + '.sh')): + return script_name+'-' + tmp_suff1 + '.sh' + elif exists(os.path.join(path, script_name+'-' + tmp_suff3 + '.sh')): + return script_name+'-' + tmp_suff3 + '.sh' + else: + return script_name+'.sh'; + +############################################################################## +def update_env_keys(env, env_key_mappings): + """ + Internal: convert env keys as per the given mapping + """ + + for key_prefix in env_key_mappings: + for key in list(env): + if key.startswith(key_prefix): + new_key = key.replace(key_prefix, env_key_mappings[key_prefix]) + env[new_key] = env[key] + #del(env[key]) + +############################################################################## +def convert_env_to_script(env, os_info, start_script = []): + """ + Internal: convert env to script for a given platform + """ + + import copy + script = copy.deepcopy(start_script) + + windows = True if os_info['platform'] == 'windows' else False + + for k in sorted(env): + env_value = env[k] + + if windows: + x = env_value + if type(env_value)!=list: + x = [x] + + xx = [] + for v in x: + # If " is already in env value, it means that there was some custom processing to consider special characters + + y=str(v) + + if '"' not in y: + for z in ['|', '&', '>', '<']: + if z in y: + y = '"'+y+'"' + break + xx.append(y) + + env_value = xx if type(env_value)==list else xx[0] + + # Process special env + key = k + + if k.startswith('+'): + # List and append the same key at the end (+PATH, +LD_LIBRARY_PATH, +PYTHONPATH) + key=k[1:] + first = key[0] + env_separator = os_info['env_separator'] + # If key starts with a symbol use it as the list separator (+ CFLAG will use ' ' the + # list separator while +;TEMP will use ';' as the separator) + if not first.isalnum(): + env_separator = first + key=key[1:] + + env_value = env_separator.join(env_value) + \ + env_separator + \ + os_info['env_var'].replace('env_var', key) + + v = os_info['set_env'].replace('${key}', key).replace('${value}', str(env_value)) + + script.append(v) + + return script + +############################################################################## +def record_script(run_script, script, os_info): + """ + Internal: record script and chmod 755 on Linux + """ + + final_script = '\n'.join(script) + + if not final_script.endswith('\n'): + final_script += '\n' + + r = utils.save_txt(file_name=run_script, string=final_script) + if r['return']>0: return r + + if os_info.get('set_exec_file','')!='': + cmd = os_info['set_exec_file'].replace('${file_name}', run_script) + rc = os.system(cmd) + + return {'return':0} + +############################################################################## +def clean_tmp_files(clean_files, recursion_spaces): + """ + Internal: clean tmp files + """ + +# print ('') +# print (recursion_spaces+' - cleaning files {} ...'.format(clean_files)) + + for tmp_file in clean_files: + if os.path.isfile(tmp_file): + os.remove(tmp_file) + + return {'return':0} + +############################################################################## +def update_dep_info(dep, new_info): + """ + Internal: add additional info to a dependency + """ + for info in new_info: + if info == "tags": + tags = dep.get('tags', '') + tags_list = tags.split(",") + new_tags_list = new_info["tags"].split(",") + combined_tags = tags_list + list(set(new_tags_list) - set(tags_list)) + dep['tags'] = ",".join(combined_tags) + else: + dep[info] = new_info[info] + +############################################################################## +def update_deps(deps, add_deps, fail_error=False): + """ + Internal: add deps tags, version etc. by name + """ + #deps_info_to_add = [ "version", "version_min", "version_max", "version_max_usable", "path" ] + new_deps_info = {} + for new_deps_name in add_deps: + dep_found = False + for dep in deps: + names = dep.get('names',[]) + if new_deps_name in names: + update_dep_info(dep, add_deps[new_deps_name]) + dep_found = True + if not dep_found and fail_error: + return {'return':1, 'error':new_deps_name + ' is not one of the dependency'} + + return {'return':0} + + +############################################################################## +def append_deps(deps, new_deps): + """ + Internal: add deps from meta + """ + + for new_dep in new_deps: + existing = False + new_dep_names = new_dep.get('names',[]) + if len(new_dep_names)>0: + for i in range(len(deps)): + dep = deps[i] + dep_names = dep.get('names',[]) + if len(dep_names)>0: + if set(new_dep_names) == set(dep_names): + deps[i] = new_dep + existing = True + break + else: #when no name, check for tags + new_dep_tags = new_dep.get('tags') + new_dep_tags_list = new_dep_tags.split(",") + for i in range(len(deps)): + dep = deps[i] + dep_tags_list = dep.get('tags').split(",") + if set(new_dep_tags_list) == set (dep_tags_list): + deps[i] = new_dep + existing = True + break + + if not existing: + deps.append(new_dep) + + return {'return':0} + +############################################################################## +def update_deps_from_input(deps, post_deps, prehook_deps, posthook_deps, i): + """ + Internal: update deps from meta + """ + add_deps_info_from_input = i.get('ad',{}) + if not add_deps_info_from_input: + add_deps_info_from_input = i.get('add_deps',{}) + else: + utils.merge_dicts({'dict1':add_deps_info_from_input, 'dict2':i.get('add_deps', {}), 'append_lists':True, 'append_unique':True}) + + add_deps_recursive_info_from_input = i.get('adr', {}) + if not add_deps_recursive_info_from_input: + add_deps_recursive_info_from_input = i.get('add_deps_recursive', {}) + else: + utils.merge_dicts({'dict1':add_deps_recursive_info_from_input, 'dict2':i.get('add_deps_recursive', {}), 'append_lists':True, 'append_unique':True}) + + if add_deps_info_from_input: + r1 = update_deps(deps, add_deps_info_from_input, True) + r2 = update_deps(post_deps, add_deps_info_from_input, True) + r3 = update_deps(prehook_deps, add_deps_info_from_input, True) + r4 = update_deps(posthook_deps, add_deps_info_from_input, True) + if r1['return']>0 and r2['return']>0 and r3['return']>0 and r4['return']>0: return r1 + if add_deps_recursive_info_from_input: + update_deps(deps, add_deps_recursive_info_from_input) + update_deps(post_deps, add_deps_recursive_info_from_input) + update_deps(prehook_deps, add_deps_recursive_info_from_input) + update_deps(posthook_deps, add_deps_recursive_info_from_input) + + return {'return':0} + + +############################################################################## +def update_env_from_input_mapping(env, inp, input_mapping): + """ + Internal: update env from input and input_mapping + """ + for key in input_mapping: + if key in inp: + env[input_mapping[key]] = inp[key] + +############################################################################## +def update_state_from_meta(meta, env, state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys, new_state_keys, i): + """ + Internal: update env and state from meta + """ + + default_env = meta.get('default_env',{}) + for key in default_env: + env.setdefault(key, default_env[key]) + update_env = meta.get('env', {}) + env.update(update_env) + + update_state = meta.get('state', {}) + utils.merge_dicts({'dict1':state, 'dict2':update_state, 'append_lists':True, 'append_unique':True}) + + new_deps = meta.get('deps', []) + if len(new_deps)>0: + append_deps(deps, new_deps) + + new_post_deps = meta.get("post_deps", []) + if len(new_post_deps) > 0: + append_deps(post_deps, new_post_deps) + + new_prehook_deps = meta.get("prehook_deps", []) + if len(new_prehook_deps) > 0: + append_deps(prehook_deps, new_prehook_deps) + + new_posthook_deps = meta.get("posthook_deps", []) + if len(new_posthook_deps) > 0: + append_deps(posthook_deps, new_posthook_deps) + + add_deps_info = meta.get('ad', {}) + if not add_deps_info: + add_deps_info = meta.get('add_deps',{}) + else: + utils.merge_dicts({'dict1':add_deps_info, 'dict2':meta.get('add_deps', {}), 'append_lists':True, 'append_unique':True}) + if add_deps_info: + r1 = update_deps(deps, add_deps_info, True) + r2 = update_deps(post_deps, add_deps_info, True) + r3 = update_deps(prehook_deps, add_deps_info, True) + r4 = update_deps(posthook_deps, add_deps_info, True) + if r1['return']>0 and r2['return']>0 and r3['return'] > 0 and r4['return'] > 0: return r1 + + input_mapping = meta.get('input_mapping', {}) + if input_mapping: + update_env_from_input_mapping(env, i['input'], input_mapping) + + # Possibly restrict this to within docker environment + new_docker_settings = meta.get('docker') + if new_docker_settings: + docker_settings = state.get('docker', {}) + #docker_input_mapping = docker_settings.get('docker_input_mapping', {}) + #new_docker_input_mapping = new_docker_settings.get('docker_input_mapping', {}) + #if new_docker_input_mapping: + # # update_env_from_input_mapping(env, i['input'], docker_input_mapping) + # utils.merge_dicts({'dict1':docker_input_mapping, 'dict2':new_docker_input_mapping, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1':docker_settings, 'dict2':new_docker_settings, 'append_lists':True, 'append_unique':True}) + state['docker'] = docker_settings + + new_env_keys_from_meta = meta.get('new_env_keys', []) + if new_env_keys_from_meta: + new_env_keys += new_env_keys_from_meta + + new_state_keys_from_meta = meta.get('new_state_keys', []) + if new_state_keys_from_meta: + new_state_keys += new_state_keys_from_meta + + return {'return':0} + +############################################################################## +def update_adr_from_meta(deps, post_deps, prehook_deps, posthook_deps, add_deps_recursive_info): + """ + Internal: update add_deps_recursive from meta + """ + if add_deps_recursive_info: + update_deps(deps, add_deps_recursive_info) + update_deps(post_deps, add_deps_recursive_info) + update_deps(prehook_deps, add_deps_recursive_info) + update_deps(posthook_deps, add_deps_recursive_info) + + return {'return':0} + +############################################################################## +def get_adr(meta): + add_deps_recursive_info = meta.get('adr', {}) + if not add_deps_recursive_info: + add_deps_recursive_info = meta.get('add_deps_recursive',{}) + else: + utils.merge_dicts({'dict1':add_deps_recursive_info, 'dict2':meta.get('add_deps_recursive', {}), 'append_lists':True, 'append_unique':True}) + return add_deps_recursive_info + +############################################################################## +def detect_state_diff(env, saved_env, new_env_keys, new_state_keys, state, saved_state): + """ + Internal: detect diff in env and state + """ + + new_env = {} + new_state = {} + + # Check if leave only specific keys or detect diff automatically + for k in new_env_keys: + if '?' in k or '*' in k: + import fnmatch + for kk in env: + if fnmatch.fnmatch(kk, k): + new_env[kk] = env[kk] + elif k in env: + new_env[k] = env[k] + elif "<<<" in k: + import re + tmp_values = re.findall(r'<<<(.*?)>>>', k) + for tmp_value in tmp_values: + if tmp_value in env: + value = env[tmp_value] + if value in env: + new_env[value] = env[value] + + for k in new_state_keys: + if '?' in k or '*' in k: + import fnmatch + for kk in state: + if fnmatch.fnmatch(kk, k): + new_state[kk] = state[kk] + elif k in state: + new_state[k] = state[k] + elif "<<<" in k: + import re + tmp_values = re.findall(r'<<<(.*?)>>>', k) + for tmp_value in tmp_values: + if tmp_value in state: + value = state[tmp_value] + if value in state: + new_state[value] = state[value] + + return {'return':0, 'env':env, 'new_env':new_env, 'state':state, 'new_state':new_state} + +############################################################################## +def select_script_artifact(lst, text, recursion_spaces, can_skip, script_tags_string, quiet, verbose): + """ + Internal: select script + """ + + string1 = recursion_spaces+' - More than 1 {} found for "{}":'.format(text,script_tags_string) + + # If quiet, select 0 (can be sorted for determinism) + if quiet: + if verbose: + print (string1) + print ('') + print ('Selected default due to "quiet" mode') + + return 0 + + # Select 1 and proceed + print (string1) + + print ('') + num = 0 + + for a in lst: + meta = a.meta + + name = meta.get('name', '') + + s = a.path + if name !='': s = '"'+name+'" '+s + + x = recursion_spaces+' {}) {} ({})'.format(num, s, ','.join(meta['tags'])) + + version = meta.get('version','') + if version!='': + x+=' (Version {})'.format(version) + + print (x) + num+=1 + + print ('') + + s = 'Make your selection or press Enter for 0' + if can_skip: + s += ' or use -1 to skip' + + x = input(recursion_spaces+' '+s+': ') + x = x.strip() + if x == '': x = '0' + + selection = int(x) + + if selection <0 and not can_skip: + selection = 0 + + if selection <0: + + print ('') + print (recursion_spaces+' Skipped') + else: + if selection >= num: + selection = 0 + + print ('') + print (recursion_spaces+' Selected {}: {}'.format(selection, lst[selection].path)) + + return selection + +############################################################################## +def check_versions(cmind, cached_script_version, version_min, version_max): + """ + Internal: check versions of the cached script + """ + skip_cached_script = False + + if cached_script_version != '': + if version_min != '': + ry = cmind.access({'action':'compare_versions', + 'automation':'utils,dc2743f8450541e3', + 'version1':cached_script_version, + 'version2':version_min}) + if ry['return']>0: return ry + + if ry['comparison'] < 0: + skip_cached_script = True + + if not skip_cached_script and version_max != '': + ry = cmind.access({'action':'compare_versions', + 'automation':'utils,dc2743f8450541e3', + 'version1':cached_script_version, + 'version2':version_max}) + if ry['return']>0: return ry + + if ry['comparison'] > 0: + skip_cached_script = True + + return skip_cached_script + +############################################################################## +def get_git_url(get_type, url, params = {}): + from giturlparse import parse + p = parse(url) + if get_type == "ssh": + return p.url2ssh + elif get_type == "token": + token = params['token'] + return "https://git:" + token + "@" + p.host + "/" + p.owner + "/" + p.repo + return url + +############################################################################## +def can_write_to_current_directory(): + + import tempfile + + cur_dir = os.getcwd() + +# try: +# tmp_file = tempfile.NamedTemporaryFile(dir = cur_dir) +# except Exception as e: +# return False + + tmp_file_name = next(tempfile._get_candidate_names())+'.tmp' + + tmp_path = os.path.join(cur_dir, tmp_file_name) + + try: + tmp_file = open(tmp_file_name, 'w') + except Exception as e: + return False + + tmp_file.close() + + os.remove(tmp_file_name) + + return True + +###################################################################################### +def dump_repro_start(repro_prefix, ii): + import json + + # Clean reproducibility and experiment files + for f in ['cm-output.json', 'version_info.json', '-input.json', '-info.json', '-output.json', '-run-state.json']: + ff = repro_prefix+f if f.startswith('-') else f + if os.path.isfile(ff): + try: + os.remove(ff) + except: + pass + + try: + with open(repro_prefix+'-input.json', 'w', encoding='utf-8') as f: + json.dump(ii, f, ensure_ascii=False, indent=2) + except: + pass + + # Get some info + info = {} + + try: + import platform + import sys + + info['host_os_name'] = os.name + info['host_system'] = platform.system() + info['host_os_release'] = platform.release() + info['host_machine'] = platform.machine() + info['host_architecture'] = platform.architecture() + info['host_python_version'] = platform.python_version() + info['host_sys_version'] = sys.version + + r = utils.gen_uid() + if r['return']==0: + info['run_uid'] = r['uid'] + + r = utils.get_current_date_time({}) + if r['return']==0: + info['run_iso_datetime'] = r['iso_datetime'] + + with open(repro_prefix+'-info.json', 'w', encoding='utf-8') as f: + json.dump(info, f, ensure_ascii=False, indent=2) + except: + pass + + + # For experiment + cm_output = {} + + cm_output['tmp_test_value']=10.0 + + cm_output['info']=info + cm_output['input']=ii + + try: + with open('cm-output.json', 'w', encoding='utf-8') as f: + json.dump(cm_output, f, ensure_ascii=False, indent=2) + except: + pass + + return {'return': 0} + +###################################################################################### +def dump_repro(repro_prefix, rr, run_state): + import json + import copy + + try: + with open(repro_prefix+'-output.json', 'w', encoding='utf-8') as f: + json.dump(rr, f, ensure_ascii=False, indent=2) + except: + pass + + try: + with open(repro_prefix+'-run-state.json', 'w', encoding='utf-8') as f: + json.dump(run_state, f, ensure_ascii=False, indent=2) + except: + pass + + # For experiment + cm_output = {} + + # Attempt to read + try: + r = utils.load_json('cm-output.json') + if r['return']==0: + cm_output = r['meta'] + except: + pass + + cm_output['output'] = rr + cm_output['state'] = copy.deepcopy(run_state) + + # Try to load version_info.json + version_info = {} + + version_info_orig = {} + + if 'version_info' in cm_output['state']: + version_info_orig = cm_output['state']['version_info'] + del(cm_output['state']['version_info']) + + try: + r = utils.load_json('version_info.json') + if r['return']==0: + version_info_orig += r['meta'] + + for v in version_info_orig: + for key in v: + dep = v[key] + version_info[key] = dep + + except: + pass + + if len(version_info)>0: + cm_output['version_info'] = version_info + + if rr['return'] == 0: + cm_output['acm_ctuning_repro_badge_available'] = True + cm_output['acm_ctuning_repro_badge_functional'] = True + + try: + with open('cm-output.json', 'w', encoding='utf-8') as f: + json.dump(cm_output, f, ensure_ascii=False, indent=2, sort_keys=True) + except: + pass + + + return {'return': 0} + + +############################################################################## +# Demo to show how to use CM components independently if needed +if __name__ == "__main__": + import cmind + auto = CAutomation(cmind, __file__) + + r=auto.test({'x':'y'}) + + print (r) diff --git a/automation/script/module_help.py b/automation/script/module_help.py new file mode 100644 index 0000000000..e27d756877 --- /dev/null +++ b/automation/script/module_help.py @@ -0,0 +1,100 @@ +import os +from cmind import utils + +# Pring help about script +def print_help(i): + + meta = i.get('meta', '') + path = i.get('path', '') + + if len(meta)==0 and path=='': + return {'return':0} + + print ('') + print ('Help for this CM script ({},{}):'.format(meta.get('alias',''), meta.get('uid',''))) + + print ('') + print ('Path to this automation recipe: {}'.format(path)) + + variations = meta.get('variations',{}) + if len(variations)>0: + print ('') + print ('Available variations:') + print ('') + for v in sorted(variations): + print (' _'+v) + + input_mapping = meta.get('input_mapping', {}) + if len(input_mapping)>0: + print ('') + print ('Available flags mapped to environment variables:') + print ('') + for k in sorted(input_mapping): + v = input_mapping[k] + + print (' --{} -> --env.{}'.format(k,v)) + + input_description = meta.get('input_description', {}) + if len(input_description)>0: + # Check if has important ones (sort) + sorted_keys = [] + all_keys = sorted(list(input_description.keys())) + + for k in sorted(all_keys, key = lambda x: input_description[x].get('sort',0)): + v = input_description[k] + if v.get('sort',0)>0: + sorted_keys.append(k) + + + print ('') + print ('Available flags (Python API dict keys):') + print ('') + for k in all_keys: + v = input_description[k] + n = v.get('desc','') + + x = ' --'+k + if n!='': x+=' ({})'.format(n) + + print (x) + + if len(sorted_keys)>0: + print ('') + print ('Main flags:') + print ('') + for k in sorted_keys: + v = input_description[k] + n = v.get('desc','') + + x = ' --'+k + + d = None + if 'default' in v: + d = v.get('default','') + + if d!=None: + x+='='+d + + c = v.get('choices',[]) + if len(c)>0: + x+=' {'+','.join(c)+'}' + + if n!='': x+=' ({})'.format(n) + + print (x) + + + + print ('') + x = input ('Would you like to see a Python API with a list of common keys/flags for all scripts including this one (y/N)? ') + + x = x.strip().lower() + + skip_delayed_help = False if x in ['y','yes'] else True + + r = {'return':0} + + if skip_delayed_help: + r['skip_delayed_help'] = True + + return r diff --git a/automation/script/module_misc.py b/automation/script/module_misc.py new file mode 100644 index 0000000000..91b7873ae9 --- /dev/null +++ b/automation/script/module_misc.py @@ -0,0 +1,1990 @@ +import os +from cmind import utils + +# Meta deps +def process_deps(self_module, meta, meta_url, md_script_readme, key, extra_space='', skip_from_meta=False, skip_if_empty=False): + + x = '' + y = [] + if len(meta.get(key,{}))>0: + x = '***' + + for d in meta[key]: + d_tags = d.get('tags', '') + + z = extra_space+' * '+d_tags + + names = d.get('names', []) + enable_if_env = d.get('enable_if_env', {}) + skip_if_env = d.get('skip_if_env', {}) + + q = '' + + q1 = '' + for e in enable_if_env: + if q1!='': q1 += ' AND ' + q1 += e+' ' + v = enable_if_env[e] + q1 += ' == '+str(v[0]) if len(v)==1 else 'in '+str(v) + if q1!='': q1 = '('+q1+')' + + q2 = '' + for e in skip_if_env: + if q2!='': q2 += ' OR ' + q2 += e+' ' + v = skip_if_env[e] + q2 += ' != '+str(v[0]) if len(v)==1 else 'not in '+str(v) + + if q2!='': q2 = '('+q2+')' + + if q1!='' or q2!='': + q = 'if ' + + if q1!='': q+=q1 + if q2!='': + if q1!='': q+=' AND ' + q+=q2 + + y.append(z) + + if q!='': + y.append(extra_space+' * `'+q+'`') + + if len(names)>0: + y.append(extra_space+' * CM names: `--adr.'+str(names)+'...`') + + + # Attempt to find related CM scripts + r = self_module.cmind.access({'action':'find', + 'automation':'script', + 'tags':d_tags}) + if r['return']==0: + lst = r['list'] + + if len(lst)==0: + y.append(extra_space+' - *Warning: no scripts found*') + else: + for s in lst: + s_repo_meta = s.repo_meta + + s_repo_alias = s_repo_meta.get('alias','') + s_repo_uid = s_repo_meta.get('uid','') + + # Check URL + s_url = '' + s_url_repo = '' + if s_repo_alias == 'internal': + s_url_repo = 'https://github.com/mlcommons/ck/tree/master/cm/cmind/repo' + s_url = s_url_repo+'/script/' + elif '@' in s_repo_alias: + s_url_repo = 'https://github.com/'+s_repo_alias.replace('@','/')+'/tree/master' + if s_repo_meta.get('prefix','')!='': s_url_repo+='/'+s_repo_meta['prefix'] + s_url = s_url_repo+ '/script/' + + s_alias = s.meta['alias'] + y.append(extra_space+' - CM script: [{}]({})'.format(s_alias, s_url+s_alias)) + + z = '' + if not skip_from_meta: + z = ' from [meta]({})'.format(meta_url) + + if not skip_if_empty or len(y)>0: + md_script_readme.append((extra_space+' 1. '+x+'Read "{}" on other CM scripts'+z+x).format(key)) + md_script_readme += y + +############################################################ +def doc(i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (../docs by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + self_module = i['self_module'] + + cur_dir = os.getcwd() + + template_file = 'template_list_of_scripts.md' + list_file = 'list_of_scripts.md' + + public_taskforce = '[Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)' + + console = i.get('out') == 'con' + + repos = i.get('repos','') + if repos == '': repos='internal,a4705959af8e447a' + + parsed_artifact = i.get('parsed_artifact',[]) + + if len(parsed_artifact)<1: + parsed_artifact = [('',''), ('','')] + elif len(parsed_artifact)<2: + parsed_artifact.append(('','')) + else: + repos = parsed_artifact[1][0] + + list_of_repos = repos.split(',') if ',' in repos else [repos] + + ii = utils.sub_input(i, self_module.cmind.cfg['artifact_keys'] + ['tags']) + + ii['out'] = None + + # Search for automations in repos + lst = [] + for repo in list_of_repos: + parsed_artifact[1] = ('',repo) if utils.is_cm_uid(repo) else (repo,'') + ii['parsed_artifact'] = parsed_artifact + r = self_module.search(ii) + if r['return']>0: return r + lst += r['list'] + + md = [] + + toc = [] + + toc_category = {} + toc_category_sort = {} + script_meta = {} + urls = {} + + for artifact in sorted(lst, key = lambda x: x.meta.get('alias','')): + + toc_readme = [] + + # Common index for all scripts + md_script = [] + + path = artifact.path + meta = artifact.meta + original_meta = artifact.original_meta + + print ('Documenting {}'.format(path)) + + alias = meta.get('alias','') + uid = meta.get('uid','') + + script_meta[alias] = meta + + name = meta.get('name','') + developers = meta.get('developers','') + + # Check if has tags help otherwise all tags + tags = meta.get('tags_help','').strip() + if tags=='': + tags = meta.get('tags',[]) + else: + tags = tags.split(' ') + + variations = meta.get('variations',{}) + + variation_keys = sorted(list(variations.keys())) + version_keys = sorted(list(meta.get('versions',{}).keys())) + + default_variation = meta.get('default_variation','') + default_version = meta.get('default_version','') + + input_mapping = meta.get('input_mapping', {}) + input_description = meta.get('input_description', {}) + + category = meta.get('category', '').strip() + category_sort = meta.get('category_sort', 0) + if category != '': + if category not in toc_category: + toc_category[category]=[] + + if category not in toc_category_sort or category_sort>0: + toc_category_sort[category]=category_sort + + if alias not in toc_category[category]: + toc_category[category].append(alias) + + repo_path = artifact.repo_path + repo_meta = artifact.repo_meta + + repo_alias = repo_meta.get('alias','') + repo_uid = repo_meta.get('uid','') + + + # Check URL + url = '' + url_repo = '' + if repo_alias == 'internal': + url_repo = 'https://github.com/mlcommons/ck/tree/dev/cm/cmind/repo' + url = url_repo+'/script/' + elif '@' in repo_alias: + url_repo = 'https://github.com/'+repo_alias.replace('@','/')+'/tree/dev' + if repo_meta.get('prefix','')!='': url_repo+='/'+repo_meta['prefix'] + url = url_repo+ '/script/' + + if url!='': + url+=alias + + urls[alias]=url + + # Check if there is about doc + path_readme = os.path.join(path, 'README.md') + path_readme_extra = os.path.join(path, 'README-extra.md') + path_readme_about = os.path.join(path, 'README-about.md') + + readme_about = '' + if os.path.isfile(path_readme_about): + r = utils.load_txt(path_readme_about, split = True) + if r['return']>0: return + + s = r['string'] + readme_about = r['list'] + + + ####################################################################### + # Start automatically generated README + md_script_readme = [ +# '
', +# 'Click here to see the table of contents.', +# '{{CM_README_TOC}}', +# '
', +# '', + 'Automatically generated README for this automation recipe: **{}**'.format(meta['alias']), + ] + + + md_script.append('## '+alias) + md_script.append('') + +# x = 'About' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) +# md_script_readme.append('') +# toc_readme.append(x) + +# x = 'About' +# md_script_readme.append('#### '+x) +# md_script_readme.append('') +# toc_readme.append(' '+x) + + if name!='': + name += '.' + md_script.append('*'+name+'*') + md_script.append('') + +# md_script_readme.append('*'+name+'*') +# md_script_readme.append('') + + + + if os.path.isfile(path_readme): + r = utils.load_txt(path_readme, split = True) + if r['return']>0: return + + s = r['string'] + readme = r['list'] + + if not 'automatically generated' in s.lower(): + found_path_readme_extra = True + + # Attempt to rename to README-extra.md + if os.path.isfile(path_readme_extra): + return {'return':1, 'error':'README.md is not auto-generated and README-extra.md already exists - can\'t rename'} + + os.rename(path_readme, path_readme_extra) + + # Add to Git (if in git) + os.chdir(path) + os.system('git add README-extra.md') + os.chdir(cur_dir) + + + + if category!='': + md_script_readme.append('') + md_script_readme.append('Category: **{}**'.format(category)) + + md_script_readme.append('') + md_script_readme.append('License: **Apache 2.0**') + + + md_script_readme.append('') + + if developers == '': + md_script_readme.append('Maintainers: ' + public_taskforce) + else: + md_script_readme.append('Developers: ' + developers) + + x = '* [{}]({})'.format(alias, url) + if name !='': x+=' *('+name+')*' + toc.append(x) + + + + cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format(alias, uid) + + if os.path.isfile(path_readme_extra): + readme_extra_url = url+'/README-extra.md' + + x = '* Notes from the authors, contributors and users: [*GitHub*]({})'.format(readme_extra_url) + md_script.append(x) + + cm_readme_extra += '[ [Notes from the authors, contributors and users](README-extra.md) ] ' + + md_script_readme.append('') + md_script_readme.append('---') + md_script_readme.append('*'+cm_readme_extra.strip()+'*') + + + if readme_about!='': + md_script_readme += ['', '---', ''] + readme_about + + + + x = 'Summary' + md_script_readme.append('') + md_script_readme.append('---') + md_script_readme += [ +# '
', +# 'Click to see the summary', + '#### Summary', + '' + ] + toc_readme.append(x) + + +# if category != '': +# x = 'Category' +# md_script_readme.append('___') +# md_script_readme.append('#### '+x) +# md_script_readme.append(' ') +# md_script_readme.append(category+'.') +# toc_readme.append(x) + +# x = '* Category: *{}*'.format(category + '.') +# md_script_readme.append(x) + + +# x = 'Origin' +# md_script_readme.append('___') +# md_script_readme.append('#### '+x) +# md_script_readme.append('') +# toc_readme.append(x) + + x = '* CM GitHub repository: *[{}]({})*'.format(repo_alias, url_repo) + md_script.append(x) + md_script_readme.append(x) + + + x = '* GitHub directory for this script: *[GitHub]({})*'.format(url) + md_script.append(x) + md_script_readme.append(x) + + + + # Check meta + meta_file = self_module.cmind.cfg['file_cmeta'] + meta_path = os.path.join(path, meta_file) + + meta_file += '.yaml' if os.path.isfile(meta_path+'.yaml') else '.json' + + meta_url = url+'/'+meta_file + + x = '* CM meta description of this script: *[GitHub]({})*'.format(meta_url) + md_script.append(x) + +# x = '* CM automation "script": *[Docs]({})*'.format('https://github.com/octoml/ck/blob/master/docs/list_of_automations.md#script') +# md_script.append(x) +# md_script_readme.append(x) + + if len(variation_keys)>0: + variation_pointer="[,variations]" + variation_pointer2="[variations]" + else: + variation_pointer='' + variation_pointer2='' + + if len(input_mapping)>0: + input_mapping_pointer="[--input_flags]" + else: + input_mapping_pointer='' + + cli_all_tags = '`cm run script --tags={}`'.format(','.join(tags)) + cli_all_tags3 = '`cm run script --tags={}{} {}`'.format(','.join(tags), variation_pointer, input_mapping_pointer) + x = '* CM CLI with all tags: {}*'.format(cli_all_tags) + md_script.append(x) + + cli_help_tags_alternative = '`cmr "{}" --help`'.format(' '.join(tags)) + + cli_all_tags_alternative = '`cmr "{}"`'.format(' '.join(tags)) + cli_all_tags_alternative3 = '`cmr "{} {}" {}`'.format(' '.join(tags), variation_pointer2, input_mapping_pointer) + cli_all_tags_alternative_j = '`cmr "{} {}" {} -j`'.format(' '.join(tags), variation_pointer, input_mapping_pointer) + x = '* CM CLI alternative: {}*'.format(cli_all_tags_alternative) + md_script.append(x) + + cli_all_tags_alternative_docker = '`cm docker script "{}{}" {}`'.format(' '.join(tags), variation_pointer2, input_mapping_pointer) + + +# cli_uid = '`cm run script {} {}`'.format(meta['uid'], input_mapping_pointer) +# x = '* CM CLI with alias and UID: {}*'.format(cli_uid) +# md_script.append(x) + + if len(variation_keys)>0: + x='' + for variation in variation_keys: + if x!='': x+=';  ' + x+='_'+variation + md_script.append('* Variations: *{}*'.format(x)) + + if default_variation!='': + md_script.append('* Default variation: *{}*'.format(default_variation)) + + if len(version_keys)>0: + md_script.append('* Versions: *{}*'.format(';  '.join(version_keys))) + + if default_version!='': + md_script.append('* Default version: *{}*'.format(default_version)) + + + + + + + + md_script.append('') +# md_script_readme.append('') + + # Add extra to README + x = 'Meta description' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) + md_script_readme.append('* CM meta description for this script: *[{}]({})*'.format(meta_file, meta_file)) +# md_script_readme.append('') +# toc_readme.append(x) + + x = 'Tags' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) + md_script_readme.append('* All CM tags to find and reuse this script (see in above meta description): *{}*'.format(','.join(tags))) +# md_script_readme.append('') +# toc_readme.append(x) + + + cache = meta.get('cache', False) + md_script_readme.append('* Output cached? *{}*'.format(str(cache))) + + md_script_readme.append('* See [pipeline of dependencies]({}) on other CM scripts'.format('#dependencies-on-other-cm-scripts')) + + md_script_readme += ['', +# '
' + ] + + + + # Add usage + x1 = 'Reuse this script in your project' + x1a = 'Install MLCommons CM automation meta-framework' + x1aa = 'Pull CM repository with this automation recipe (CM script)' + x1b = 'Print CM help from the command line' + x2 = 'Customize and run this script from the command line with different variations and flags' + x3 = 'Run this script from Python' + x3a = 'Run this script via GUI' + x4 = 'Run this script via Docker (beta)' + md_script_readme += [ + '', + '---', + '### '+x1, + '', + '#### '+x1a, + '', + '* [Install CM](https://access.cknowledge.org/playground/?action=install)', + '* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)', + '', + '#### '+x1aa, + '', + '```cm pull repo {}```'.format(repo_alias), + '', + '#### '+x1b, + '', + '```{}```'.format(cli_help_tags_alternative), + '', + '#### '+x2, + '', + '{}'.format(cli_all_tags), + '', + '{}'.format(cli_all_tags3), + '', + '*or*', + '', + '{}'.format(cli_all_tags_alternative), + '', + '{}'.format(cli_all_tags_alternative3), + '', +# '3. {}'.format(cli_uid), + ''] + + + x = ' and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.' + if len(variation_keys)>0: + md_script_readme += ['* *See the list of `variations` [here](#variations)'+x+'*', + '' + ] + + if input_description and len(input_description)>0: + x = 'Input Flags' + md_script_readme.append('') + md_script_readme.append('#### '+x) + toc_readme.append(' '+x) + + md_script_readme.append('') + key0 = '' + for key in input_description: + if key0=='': key0=key + + value = input_description[key] + desc = value + + if type(value) == dict: + desc = value['desc'] + + choices = value.get('choices', []) + if len(choices) > 0: + desc+=' {'+','.join(choices)+'}' + + default = value.get('default','') + if default!='': + desc+=' (*'+str(default)+'*)' + + md_script_readme.append('* --**{}**={}'.format(key,desc)) + + md_script_readme.append('') + md_script_readme.append('**Above CLI flags can be used in the Python CM API as follows:**') + md_script_readme.append('') + + x = '```python\nr=cm.access({... , "'+key0+'":...}\n```' + md_script_readme.append(x) + + + + + + md_script_readme += ['#### '+x3, + '', + '
', + 'Click here to expand this section.', + '', + '```python', + '', + 'import cmind', + '', + "r = cmind.access({'action':'run'", + " 'automation':'script',", + " 'tags':'{}'".format(','.join(tags)), + " 'out':'con',", + " ...", + " (other input keys for this script)", + " ...", + " })", + "", + "if r['return']>0:", + " print (r['error'])", + '', + '```', + '', + '
', + '', + + '', + '#### '+x3a, + '', + '```cmr "cm gui" --script="'+','.join(tags)+'"```', + '', + 'Use this [online GUI](https://cKnowledge.org/cm-gui/?tags={}) to generate CM CMD.'.format(','.join(tags)), + '', + '#### '+x4, + '', + '{}'.format(cli_all_tags_alternative_docker), + '' + ] + toc_readme.append(x1) + toc_readme.append(' '+x1a) + toc_readme.append(' '+x1b) + toc_readme.append(' '+x2) + toc_readme.append(' '+x3) + toc_readme.append(' '+x3a) + toc_readme.append(' '+x4) + + x = 'Customization' + md_script_readme.append('___') + md_script_readme.append('### '+x) + md_script_readme.append('') + toc_readme.append(x) + + + + + if len(variation_keys)>0: +# x = 'Variation groups' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) +# toc_readme.append(x) + + variation_groups = {} + default_variations = [] + variation_md = {} + variation_alias = {} + + # Normally should not use anymore. Should use default:true inside individual variations. + default_variation = meta.get('default_variation','') + + for variation_key in sorted(variation_keys): + variation = variations[variation_key] + + alias = variation.get('alias','').strip() + + if alias!='': + aliases = variation_alias.get(alias, []) + if variation_key not in aliases: + aliases.append(variation_key) + variation_alias[alias]=aliases + + # Do not continue this loop if alias + continue + + default = variation.get('default', False) + + if not default: + # Check outdated + if default_variation == variation_key: + default = True + + extra1 = '' + extra2 = '' + if default: + extra1 = '**' + extra2 = '** (default)' + + default_variations.append(variation_key) + + + md_var = [] + + md_var.append('* {}`_{}`{}'.format(extra1, variation_key, extra2)) + + variation_md[variation_key] = md_var + +# md_script_readme+=md_var + + group = variation.get('group','') + + if variation_key.endswith('_'): + group = '*Internal group (variations should not be selected manually)*' + elif group == '': + group = '*No group (any variation can be selected)*' + + if group not in variation_groups: + variation_groups[group]=[] + + variation_groups[group].append(variation_key) + + + x = 'Variations' + md_script_readme.append('') + md_script_readme.append('#### '+x) + toc_readme.append(' '+x) + + variation_groups_order = meta.get('variation_groups_order',[]) + for variation in sorted(variation_groups): + if variation not in variation_groups_order: + variation_groups_order.append(variation) + + for group_key in variation_groups_order: + md_script_readme.append('') + + if not group_key.startswith('*'): + md_script_readme.append(' * Group "**{}**"'.format(group_key)) + else: + md_script_readme.append(' * {}'.format(group_key)) + + + md_script_readme += [ + '
', + ' Click here to expand this section.', + '' + ] + + for variation_key in sorted(variation_groups[group_key]): + variation = variations[variation_key] + + xmd = variation_md[variation_key] + + aliases = variation_alias.get(variation_key,[]) + aliases2 = ['_'+v for v in aliases] + + if len(aliases)>0: + xmd.append(' - Aliases: `{}`'.format(','.join(aliases2))) + + if len(variation.get('env',{}))>0: + xmd.append(' - Environment variables:') + for key in variation['env']: + xmd.append(' - *{}*: `{}`'.format(key, variation['env'][key])) + + xmd.append(' - Workflow:') + + for dep in ['deps', 'prehook_deps', 'posthook_deps', 'post_deps']: + process_deps(self_module, variation, meta_url, xmd, dep, ' ', True, True) + + for x in xmd: + md_script_readme.append(' '+x) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append('') + + # Check if has invalid_variation_combinations + vvc = meta.get('invalid_variation_combinations', []) + if len(vvc)>0: + x = 'Unsupported or invalid variation combinations' + md_script_readme.append('') + md_script_readme.append('#### '+x) + md_script_readme.append('') + md_script_readme.append('') + md_script_readme.append('') + toc_readme.append(' '+x) + + for v in vvc: + vv = ['_'+x for x in v] + md_script_readme.append('* `'+','.join(vv)+'`') + + + if len(default_variations)>0: + md_script_readme.append('') + md_script_readme.append('#### Default variations') + md_script_readme.append('') + + dv = ['_'+x for x in sorted(default_variations)] + + md_script_readme.append('`{}`'.format(','.join(dv))) + + + # Check if has valid_variation_combinations + vvc = meta.get('valid_variation_combinations', []) + if len(vvc)>0: + x = 'Valid variation combinations checked by the community' + md_script_readme.append('') + md_script_readme.append('#### '+x) + md_script_readme.append('') + md_script_readme.append('') + md_script_readme.append('') + toc_readme.append(' '+x) + + for v in vvc: + vv = ['_'+x for x in v] + md_script_readme.append('* `'+','.join(vv)+'`') + + + + + + # Check input flags + if input_mapping and len(input_mapping)>0: + x = 'Script flags mapped to environment' + md_script_readme.append('') + md_script_readme.append('#### '+x) + toc_readme.append(' '+x) + + md_script_readme.append('
') + md_script_readme.append('Click here to expand this section.') + + md_script_readme.append('') + key0 = '' + for key in sorted(input_mapping): + if key0=='': key0=key + value = input_mapping[key] + md_script_readme.append('* `--{}=value` → `{}=value`'.format(key,value)) + + md_script_readme.append('') + md_script_readme.append('**Above CLI flags can be used in the Python CM API as follows:**') + md_script_readme.append('') + + x = '```python\nr=cm.access({... , "'+key0+'":...}\n```' + md_script_readme.append(x) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append('') + + + # Default environment + default_env = meta.get('default_env',{}) + + x = 'Default environment' +# md_script_readme.append('___') + md_script_readme.append('#### '+x) + toc_readme.append(' '+x) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append('Click here to expand this section.') + md_script_readme.append('') + md_script_readme.append('These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.') + md_script_readme.append('') + + for key in default_env: + value = default_env[key] + md_script_readme.append('* {}: `{}`'.format(key,value)) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append('') + + + + + + + + if len(version_keys)>0 or default_version!='': + x = 'Versions' +# md_script_readme.append('___') + md_script_readme.append('#### '+x) + toc_readme.append(x) + + if default_version!='': + md_script_readme.append('Default version: `{}`'.format(default_version)) + md_script_readme.append('') + + if len(version_keys)>0: + for version in version_keys: + md_script_readme.append('* `{}`'.format(version)) + + + + # Add workflow + x = 'Dependencies on other CM scripts' + md_script_readme += ['___', + '### '+x, + ''] + toc_readme.append(x) + +# md_script_readme.append('
') +# md_script_readme.append('Click here to expand this section.') + + md_script_readme.append('') + + # Check customize.py file + path_customize = os.path.join(path, 'customize.py') + found_customize = False + found_customize_preprocess = False + found_customize_postprocess = False + found_output_env = [] + + if os.path.isfile(path_customize): + found_customize = True + + r = utils.load_txt(path_customize, split=True) + if r['return']>0: return r + + customize = r['string'] + customize_l = r['list'] + + if 'def preprocess(' in customize: + found_customize_preprocess = True + + if 'def postprocess(' in customize: + found_customize_postprocess = True + + # Ugly attempt to get output env + found_postprocess = False + for l in customize_l: +# if not found_postprocess: +# if 'def postprocess' in l: +# found_postprocess = True +# else: + j = l.find(' env[') + if j>=0: + j1 = l.find(']', j+4) + if j1>=0: + j2 = l.find('=',j1+1) + if j2>=0: + key2 = l[j+5:j1].strip() + key=key2[1:-1] + + if key.startswith('CM_') and 'TMP' not in key and key not in found_output_env: + found_output_env.append(key) + + process_deps(self_module, meta, meta_url, md_script_readme, 'deps') + + x = '' + y = 'customize.py' + if found_customize_preprocess: + x = '***' + y = '['+y+']('+url+'/'+y+')' + md_script_readme.append((' 1. '+x+'Run "preprocess" function from {}'+x).format(y)) + + process_deps(self_module, meta, meta_url, md_script_readme, 'prehook_deps') + + # Check scripts + files = os.listdir(path) + x = '' + y = [] + for f in sorted(files): + x = '***' + if f.startswith('run') and (f.endswith('.sh') or f.endswith('.bat')): + f_url = url+'/'+f + y.append(' * [{}]({})'.format(f, f_url)) + + md_script_readme.append((' 1. '+x+'Run native script if exists'+x).format(y)) + md_script_readme += y + + process_deps(self_module, meta, meta_url, md_script_readme, 'posthook_deps') + + x = '' + y = 'customize.py' + if found_customize_postprocess: + x = '***' + y = '['+y+']('+url+'/'+y+')' + md_script_readme.append((' 1. '+x+'Run "postrocess" function from {}'+x).format(y)) + + process_deps(self_module, meta, meta_url, md_script_readme, 'post_deps') + # md_script_readme.append('
') + md_script_readme.append('') + + # New environment + new_env_keys = meta.get('new_env_keys',[]) + + x = 'Script output' + md_script_readme.append('___') + md_script_readme.append('### '+x) + toc_readme.append(x) + + md_script_readme.append(cli_all_tags_alternative_j) + + x = 'New environment keys (filter)' + md_script_readme.append('#### '+x) + toc_readme.append(x) + + md_script_readme.append('') + for key in sorted(new_env_keys): + md_script_readme.append('* `{}`'.format(key)) + + # Pass found_output_env through above filter + found_output_env_filtered = [] + + import fnmatch + + for key in found_output_env: + add = False + + for f in new_env_keys: + if fnmatch.fnmatch(key, f): + add = True + break + + if add: + found_output_env_filtered.append(key) + + x = 'New environment keys auto-detected from customize' + md_script_readme.append('#### '+x) + toc_readme.append(x) + + md_script_readme.append('') + for key in sorted(found_output_env_filtered): + md_script_readme.append('* `{}`'.format(key)) + + + + # Add maintainers +# x = 'Maintainers' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) +# md_script_readme.append('') +# md_script_readme.append('* ' + public_taskforce) +# toc_readme.append(x) + + # Process TOC + toc_readme_string = '\n' + for x in toc_readme: + x2 = x + prefix = '' + + if x.startswith(' '): + prefix = ' ' + x2 = x[1:] + + x2 = x2.lower().replace(' ','-').replace(',','') + toc_readme_string += prefix + '* [{}](#{})\n'.format(x, x2) + + # Add to the total list + md += md_script + + s = '\n'.join(md_script_readme) + + s = s.replace('{{CM_README_EXTRA}}', cm_readme_extra) +# s = s.replace('{{CM_SEE_README_EXTRA}}', cm_see_readme_extra) + s = s.replace('{{CM_README_TOC}}', toc_readme_string) + + r = utils.save_txt(path_readme, s) + if r['return']>0: return r + + # Add to Git (if in git) + os.chdir(path) + os.system('git add README.md') + os.chdir(cur_dir) + + + # Recreate TOC with categories + toc2 = [] + + for category in sorted(toc_category):#, key = lambda x: -toc_category_sort[x]): + toc2.append('### '+category) + toc2.append('') + + for script in sorted(toc_category[category]): + + meta = script_meta[script] + + name = meta.get('name','') + + url = urls[script] + + x = '* [{}]({})'.format(script, url) + if name !='': x+=' *('+name+')*' + + toc2.append(x) + + toc2.append('') + + toc_category_string = '' + for category in sorted(toc_category): + category_link = category.lower().replace(' ','-').replace('/','') + toc_category_string += '* [{}](#{})\n'.format(category, category_link) + + + # Load template + r = utils.load_txt(os.path.join(self_module.path, template_file)) + if r['return']>0: return r + + s = r['string'] + + s = s.replace('{{CM_TOC2}}', '\n'.join(toc2)) + s = s.replace('{{CM_TOC}}', '\n'.join(toc)) +# s = s.replace('{{CM_MAIN}}', '\n'.join(md)) + s = s.replace('{{CM_MAIN}}', '') + s = s.replace('{{CM_TOC_CATEGORIES}}', toc_category_string) + + # Output + output_dir = i.get('output_dir','') + + if output_dir == '': output_dir = '..' + + output_file = os.path.join(output_dir, list_file) + + r = utils.save_txt(output_file, s) + if r['return']>0: return r + + return {'return':0} + + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +def update_path_for_docker(path, mounts, force_path_target=''): + + path_orig = '' + path_target = '' + + if path!='': # and (os.path.isfile(path) or os.path.isdir(path)): + path = os.path.abspath(path) + + path_target = path + path_orig = path + + if os.name == 'nt': + from pathlib import PureWindowsPath, PurePosixPath + + x = PureWindowsPath(path_orig) + path_target = str(PurePosixPath('/', *x.parts[1:])) + + if not path_target.startswith('/'): path_target='/'+path_target + + path_target='/cm-mount'+path_target if force_path_target=='' else force_path_target + + # If file, mount directory + if os.path.isfile(path) or not os.path.isdir(path): + x = os.path.dirname(path_orig) + ':' + os.path.dirname(path_target) + else: + x = path_orig + ':' + path_target + + # CHeck if no duplicates + to_add = True + for y in mounts: + if y.lower()==x.lower(): + to_add = False + break + + if to_add: + mounts.append(x) + + + return (path_orig, path_target) + +############################################################ +def process_inputs(i): + + import copy + + i_run_cmd_arc = i['run_cmd_arc'] + docker_settings = i['docker_settings'] + mounts = i['mounts'] + + # Check if need to update/map/mount inputs and env + i_run_cmd = copy.deepcopy(i_run_cmd_arc) + + + def get_value_using_key_with_dots(d, k): + v = None + j = k.find('.') + if j>=0: + k1 = k[:j] + k2 = k[j+1:] + + if k1 in d: + v = d[k1] + + if '.' in k2: + v, d, k = get_value_using_key_with_dots(v, k2) + else: + d = v + k = k2 + if type(v)==dict: + v = v.get(k2) + else: + v = None + else: + if k == '': + v = d + else: + v = d.get(k) + + return v, d, k + + docker_input_paths = docker_settings.get('input_paths',[]) + if len(i_run_cmd)>0: + for k in docker_input_paths: + v2, i_run_cmd2, k2 = get_value_using_key_with_dots(i_run_cmd, k) + + if v2!=None: + v=i_run_cmd2[k2] + + path_orig, path_target = update_path_for_docker(v, mounts) + + if path_target!='': + i_run_cmd2[k2] = path_target + + return {'return':0, 'run_cmd':i_run_cmd} + + +############################################################ +def regenerate_script_cmd(i): + + script_uid = i['script_uid'] + script_alias = i['script_alias'] + tags = i['tags'] + docker_settings = i['docker_settings'] + fake_run = i.get('fake_run', False) + + i_run_cmd = i['run_cmd'] + + docker_run_cmd_prefix = i['docker_run_cmd_prefix'] + + # Regenerate command from dictionary input + run_cmd = 'cm run script' + + x = '' + + # Check if there are some tags without variation + requested_tags = i_run_cmd.get('tags', []) + + tags_without_variation = False + for t in requested_tags: + if not t.startswith('_'): + tags_without_variation = True + break + + if not tags_without_variation: + # If no tags without variation, add script alias and UID explicitly + if script_uid!='': x=script_uid + if script_alias!='': + if x!='': x=','+x + x = script_alias+x + + if x!='': + run_cmd += ' ' + x + ' ' + + + skip_input_for_fake_run = docker_settings.get('skip_input_for_fake_run', []) + add_quotes_to_keys = docker_settings.get('add_quotes_to_keys', []) + + + def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_keys, key_prefix): + + run_cmd = '' + + keys = list(i_run_cmd.keys()) + + if 'tags' in keys: + # Move tags first + tags_position = keys.index('tags') + del(keys[tags_position]) + keys = ['tags']+keys + + for k in keys: + # Assemble long key if dictionary + long_key = key_prefix + if long_key!='': long_key+='.' + long_key+=k + + if fake_run and long_key in skip_input_for_fake_run: + continue + + v = i_run_cmd[k] + + q = '\\"' if long_key in add_quotes_to_keys else '' + + if type(v)==dict: + run_cmd += rebuild_flags(v, fake_run, skip_input_for_fake_run, add_quotes_to_keys, long_key) + elif type(v)==list: + x = '' + for vv in v: + if x != '': x+=',' + x+=q+str(vv)+q + run_cmd+=' --'+long_key+',=' + x + else: + run_cmd+=' --'+long_key+'='+q+str(v)+q + + return run_cmd + + run_cmd += rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_keys, '') + + run_cmd = docker_run_cmd_prefix + ' && ' + run_cmd if docker_run_cmd_prefix!='' else run_cmd + + return {'return':0, 'run_cmd_string':run_cmd} + + + +############################################################ +def aux_search(i): + + self_module = i['self_module'] + + inp = i['input'] + + repos = inp.get('repos','') +# Grigori Fursin remarked on 20240412 because this line prevents +# from searching for scripts in other public or private repositories. +# Not sure why we enforce just 2 repositories +# +# if repos == '': repos='internal,a4705959af8e447a' + + parsed_artifact = inp.get('parsed_artifact',[]) + + if len(parsed_artifact)<1: + parsed_artifact = [('',''), ('','')] + elif len(parsed_artifact)<2: + parsed_artifact.append(('','')) + else: + repos = parsed_artifact[1][0] + + list_of_repos = repos.split(',') if ',' in repos else [repos] + + ii = utils.sub_input(inp, self_module.cmind.cfg['artifact_keys'] + ['tags']) + + ii['out'] = None + + # Search for automations in repos + lst = [] + for repo in list_of_repos: + parsed_artifact[1] = ('',repo) if utils.is_cm_uid(repo) else (repo,'') + ii['parsed_artifact'] = parsed_artifact + r = self_module.search(ii) + if r['return']>0: return r + lst += r['list'] + + return {'return':0, 'list':lst} + + +############################################################ +def dockerfile(i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import copy + + # Check simplified CMD: cm docker script "python app image-classification onnx" + # If artifact has spaces, treat them as tags! + self_module = i['self_module'] + self_module.cmind.access({'action':'detect_tags_in_artifact', 'automation':'utils', 'input':i}) + + # Prepare "clean" input to replicate command + r = self_module.cmind.access({'action':'prune_input', 'automation':'utils', 'input':i, 'extra_keys_starts_with':['docker_']}) + i_run_cmd_arc = r['new_input'] + + cur_dir = os.getcwd() + + quiet = i.get('quiet', False) + + console = i.get('out') == 'con' + + cm_repo = i.get('docker_cm_repo', 'mlcommons@ck') + cm_repo_flags = i.get('docker_cm_repo_flags', '') + + # Search for script(s) + r = aux_search({'self_module': self_module, 'input': i}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + return {'return':1, 'error':'no scripts were found'} + + + + +# if i.get('cmd'): +# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') ) +# elif i.get('artifact'): +# run_cmd = "cm run script "+i['artifact'] +# elif i.get('tags'): +# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\"" +# else: +# run_cmd = "" +# +# run_cmd = i.get('docker_run_cmd_prefix') + ' && ' + run_cmd if i.get('docker_run_cmd_prefix') else run_cmd + + + + + + env=i.get('env', {}) + state = i.get('state', {}) + script_automation = i['self_module'] + + dockerfile_env=i.get('dockerfile_env', {}) + dockerfile_env['CM_RUN_STATE_DOCKER'] = True + + tags_split = i.get('tags', '').split(",") + variation_tags = [ t[1:] for t in tags_split if t.startswith("_") ] + + for artifact in sorted(lst, key = lambda x: x.meta.get('alias','')): + + meta = artifact.meta + + script_path = artifact.path + + tags = meta.get("tags", []) + tag_string=",".join(tags) + + script_alias = meta.get('alias', '') + script_uid = meta.get('uid', '') + + + variations = meta.get('variations', {}) + docker_settings = meta.get('docker', {}) + state['docker'] = docker_settings + + r = script_automation._update_state_from_variations(i, meta, variation_tags, variations, env, state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys_from_meta = [], new_state_keys_from_meta = [], add_deps_recursive = {}, run_state = {}, recursion_spaces='', verbose = False) + if r['return'] > 0: + return r + + docker_settings = state['docker'] + + if not docker_settings.get('run', True): + print("docker.run set to False in _cm.json") + continue + '''run_config_path = os.path.join(script_path,'run_config.yml') + if not os.path.exists(run_config_path): + print("No run_config.yml file present in {}".format(script_path)) + continue + import yaml + with open(run_config_path, 'r') as run_config_file: + run_config = yaml.safe_load(run_config_file) + docker_settings = run_config.get('docker') + if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'): + print("Run config is not configured for docker run in {}".format(run_config_path)) + continue + ''' + + # Check if need to update/map/mount inputs and env + r = process_inputs({'run_cmd_arc': i_run_cmd_arc, + 'docker_settings': docker_settings, + 'mounts':[]}) + if r['return']>0: return r + + i_run_cmd = r['run_cmd'] + + docker_run_cmd_prefix = i.get('docker_run_cmd_prefix', docker_settings.get('run_cmd_prefix', '')) + + r = regenerate_script_cmd({'script_uid':script_uid, + 'script_alias':script_alias, + 'run_cmd':i_run_cmd, + 'tags':tags, + 'fake_run':True, + 'docker_settings':docker_settings, + 'docker_run_cmd_prefix':docker_run_cmd_prefix}) + if r['return']>0: return r + + run_cmd = r['run_cmd_string'] + + + docker_base_image = i.get('docker_base_image', docker_settings.get('base_image')) + docker_os = i.get('docker_os', docker_settings.get('docker_os', 'ubuntu')) + docker_os_version = i.get('docker_os_version', docker_settings.get('docker_os_version', '22.04')) + + docker_cm_repos = i.get('docker_cm_repos', docker_settings.get('cm_repos', '')) + + docker_extra_sys_deps = i.get('docker_extra_sys_deps', '') + + if not docker_base_image: + dockerfilename_suffix = docker_os +'_'+docker_os_version + else: + if os.name == 'nt': + dockerfilename_suffix = docker_base_image.replace('/', '-').replace(':','-') + else: + dockerfilename_suffix = docker_base_image.split("/") + dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1] + + fake_run_deps = i.get('fake_run_deps', docker_settings.get('fake_run_deps', False)) + docker_run_final_cmds = docker_settings.get('docker_run_final_cmds', []) + + r = check_gh_token(i, docker_settings, quiet) + if r['return'] >0 : return r + gh_token = r['gh_token'] + i['docker_gh_token'] = gh_token # To pass to docker function if needed + + if i.get('docker_real_run', docker_settings.get('docker_real_run',False)): + fake_run_option = " " + fake_run_deps = False + else: + fake_run_option = " --fake_run" + + docker_copy_files = i.get('docker_copy_files', docker_settings.get('copy_files', [])) + + env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds + + docker_path = i.get('docker_path', '').strip() + if docker_path == '': + docker_path = script_path + + dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile') + + if i.get('print_deps'): + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': f'{tag_string}', + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True, + 'fake_deps': True + } + r = self_module.cmind.access(cm_input) + if r['return'] > 0: + return r + print_deps = r['new_state']['print_deps'] + comments = [ "#RUN " + dep for dep in print_deps ] + comments.append("") + comments.append("# Run CM workflow") + else: + comments = [] + + cm_docker_input = {'action': 'run', + 'automation': 'script', + 'tags': 'build,dockerfile', + 'cm_repo': cm_repo, + 'cm_repo_flags': cm_repo_flags, + 'docker_base_image': docker_base_image, + 'docker_os': docker_os, + 'docker_os_version': docker_os_version, + 'file_path': dockerfile_path, + 'fake_run_option': fake_run_option, + 'comments': comments, + 'run_cmd': f'{run_cmd} --quiet', + 'script_tags': f'{tag_string}', + 'copy_files': docker_copy_files, + 'quiet': True, + 'env': env, + 'dockerfile_env': dockerfile_env, + 'v': i.get('v', False), + 'fake_docker_deps': fake_run_deps, + 'print_deps': True, + 'real_run': True + } + + if docker_cm_repos != '': + cm_docker_input['cm_repos'] = docker_cm_repos + + if gh_token != '': + cm_docker_input['gh_token'] = gh_token + + if docker_extra_sys_deps != '': + cm_docker_input['extra_sys_deps'] = docker_extra_sys_deps + + r = self_module.cmind.access(cm_docker_input) + if r['return'] > 0: + return r + + print ('') + print ("Dockerfile generated at " + dockerfile_path) + + return {'return':0} + +def get_container_path(value): + path_split = value.split(os.sep) + if len(path_split) == 1: + return value + + new_value = '' + if "cache" in path_split and "local" in path_split: + new_path_split = [ "", "home", "cmuser" ] + repo_entry_index = path_split.index("local") + new_path_split += path_split[repo_entry_index:] + return "/".join(new_path_split) + + return value + + +############################################################ +def docker(i): + """ + CM automation to run CM scripts via Docker + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (docker_path) (str): where to create or find Dockerfile + (docker_gh_token) (str): GitHub token for private repositories + (docker_save_script) (str): if !='' name of script to save docker command + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import copy + import re + + quiet = i.get('quiet', False) + + detached = i.get('docker_detached', '') + if detached=='': + detached = i.get('docker_dt', '') + if detached=='': + detached='no' + + interactive = i.get('docker_interactive', '') + if interactive == '': + interactive = i.get('docker_it', '') + + verbose = i.get('v', False) + show_time = i.get('show_time', False) + + # Check simplified CMD: cm docker script "python app image-classification onnx" + # If artifact has spaces, treat them as tags! + self_module = i['self_module'] + self_module.cmind.access({'action':'detect_tags_in_artifact', 'automation':'utils', 'input':i}) + + # Prepare "clean" input to replicate command + r = self_module.cmind.access({'action':'prune_input', 'automation':'utils', 'input':i, 'extra_keys_starts_with':['docker_']}) + i_run_cmd_arc = r['new_input'] + + noregenerate_docker_file = i.get('docker_noregenerate', False) + + if not noregenerate_docker_file: + r = utils.call_internal_module(self_module, __file__, 'module_misc', 'dockerfile', i) + if r['return']>0: return r + + cur_dir = os.getcwd() + + console = i.get('out') == 'con' + + # Search for script(s) + r = aux_search({'self_module': self_module, 'input': i}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + return {'return':1, 'error':'no scripts were found'} + + env=i.get('env', {}) + env['CM_RUN_STATE_DOCKER'] = False + script_automation = i['self_module'] + state = i.get('state', {}) + + tags_split = i.get('tags', '').split(",") + variation_tags = [ t[1:] for t in tags_split if t.startswith("_") ] + + docker_cache = i.get('docker_cache', "yes") + if docker_cache in ["no", False, "False" ]: + if 'CM_DOCKER_CACHE' not in env: + env['CM_DOCKER_CACHE'] = docker_cache + + image_repo = i.get('docker_image_repo','') + if image_repo == '': + image_repo = 'cknowledge' + + for artifact in sorted(lst, key = lambda x: x.meta.get('alias','')): + + meta = artifact.meta + + if i.get('help',False): + return utils.call_internal_module(self_module, __file__, 'module_help', 'print_help', {'meta':meta, 'path':artifact.path}) + + script_path = artifact.path + + tags = meta.get("tags", []) + tag_string=",".join(tags) + + script_alias = meta.get('alias', '') + script_uid = meta.get('uid', '') + + + mounts = copy.deepcopy(i.get('docker_mounts', [])) + + '''run_config_path = os.path.join(script_path,'run_config.yml') + if not os.path.exists(run_config_path): + print("No run_config.yml file present in {}".format(script_path)) + continue + import yaml + with open(run_config_path, 'r') as run_config_file: + run_config = yaml.safe_load(run_config_file) + ''' + + variations = meta.get('variations', {}) + docker_settings = meta.get('docker', {}) + state['docker'] = docker_settings + + r = script_automation._update_state_from_variations(i, meta, variation_tags, variations, env, state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys_from_meta = [], new_state_keys_from_meta = [], add_deps_recursive = {}, run_state = {}, recursion_spaces='', verbose = False) + if r['return'] > 0: + return r + + docker_settings = state['docker'] + + if not docker_settings.get('run', True): + print("docker.run set to False in _cm.json") + continue + ''' + if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'): + print("Run config is not configured for docker run in {}".format(run_config_path)) + continue + ''' + + # Check if need to update/map/mount inputs and env + r = process_inputs({'run_cmd_arc': i_run_cmd_arc, + 'docker_settings': docker_settings, + 'mounts':mounts}) + if r['return']>0: return r + + i_run_cmd = r['run_cmd'] + + # Check if need to mount home directory + current_path_target = '/cm-mount/current' + if docker_settings.get('mount_current_dir','')=='yes': + update_path_for_docker('.', mounts, force_path_target=current_path_target) + + + _os = i.get('docker_os', docker_settings.get('docker_os', 'ubuntu')) + version = i.get('docker_os_version', docker_settings.get('docker_os_version', '22.04')) + + deps = docker_settings.get('deps', []) + if deps: + # Todo: Support state, const and add_deps_recursive + run_state = {'deps':[], 'fake_deps':[], 'parent': None} + run_state['script_id'] = script_alias + "," + script_uid + run_state['script_variation_tags'] = variation_tags + r = script_automation._run_deps(deps, [], env, {}, {}, {}, {}, '', {}, '', False, '', verbose, show_time, ' ', run_state) + if r['return'] > 0: + return r + + for key in docker_settings.get('mounts', []): + mounts.append(key) + + # Updating environment variables from CM input based on input_mapping from meta + input_mapping = meta.get('input_mapping', {}) + + for c_input in input_mapping: + if c_input in i: + env[input_mapping[c_input]] = i[c_input] + + # Updating environment variables from CM input based on docker_input_mapping from meta + + docker_input_mapping = docker_settings.get('docker_input_mapping', {}) + + for c_input in docker_input_mapping: + if c_input in i: + env[docker_input_mapping[c_input]] = i[c_input] + + container_env_string = '' # env keys corresponding to container mounts are explicitly passed to the container run cmd + for index in range(len(mounts)): + mount = mounts[index] + + # Since windows may have 2 :, we search from the right + j = mount.rfind(':') + if j>0: + mount_parts = [mount[:j], mount[j+1:]] + else: + return {'return':1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount)} + +# mount_parts = mount.split(":") +# if len(mount_parts) != 2: +# return {'return': 1, 'error': f'Invalid mount specified in docker settings'} + + host_mount = mount_parts[0] + new_host_mount = host_mount + container_mount = mount_parts[1] + new_container_mount = container_mount + + tmp_values = re.findall(r'\${{ (.*?) }}', str(host_mount)) + skip = False + if tmp_values: + for tmp_value in tmp_values: + if tmp_value in env: + new_host_mount = env[tmp_value] + else:# we skip those mounts + mounts[index] = None + skip = True + break + + tmp_values = re.findall(r'\${{ (.*?) }}', str(container_mount)) + if tmp_values: + for tmp_value in tmp_values: + if tmp_value in env: + new_container_mount = get_container_path(env[tmp_value]) + container_env_string += " --env.{}={} ".format(tmp_value, new_container_mount) + else:# we skip those mounts + mounts[index] = None + skip = True + break + + if skip: + continue + mounts[index] = new_host_mount+":"+new_container_mount + + mounts = list(filter(lambda item: item is not None, mounts)) + + mount_string = "" if len(mounts)==0 else ",".join(mounts) + + #check for proxy settings and pass onto the docker + proxy_keys = [ "ftp_proxy", "FTP_PROXY", "http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY", "socks_proxy", "SOCKS_PROXY", "GH_TOKEN" ] + + if env.get('+ CM_DOCKER_BUILD_ARGS', []) == []: + env['+ CM_DOCKER_BUILD_ARGS'] = [] + + for key in proxy_keys: + if os.environ.get(key, '') != '': + value = os.environ[key] + container_env_string += " --env.{}={} ".format(key, value) + env['+ CM_DOCKER_BUILD_ARGS'].append("{}={}".format(key, value)) + + docker_use_host_group_id = i.get('docker_use_host_group_id', docker_settings.get('use_host_group_id')) + if docker_use_host_group_id and os.name != 'nt': + env['+ CM_DOCKER_BUILD_ARGS'].append("{}={}".format('CM_ADD_DOCKER_GROUP_ID', '\\"-g $(id -g $USER) -o\\"')) + + docker_base_image = i.get('docker_base_image', docker_settings.get('base_image')) + docker_os = i.get('docker_os', docker_settings.get('docker_os', 'ubuntu')) + docker_os_version = i.get('docker_os_version', docker_settings.get('docker_os_version', '22.04')) + + if not docker_base_image: + dockerfilename_suffix = docker_os +'_'+docker_os_version + else: + if os.name == 'nt': + dockerfilename_suffix = docker_base_image.replace('/', '-').replace(':','-') + else: + dockerfilename_suffix = docker_base_image.split("/") + dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1] + + + cm_repo=i.get('docker_cm_repo', 'mlcommons@ck') + + docker_path = i.get('docker_path', '').strip() + if docker_path == '': + docker_path = script_path + + dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile') + + docker_skip_run_cmd = i.get('docker_skip_run_cmd', docker_settings.get('skip_run_cmd', False)) #skips docker run cmd and gives an interactive shell to the user + + docker_pre_run_cmds = i.get('docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', []) + + docker_run_cmd_prefix = i.get('docker_run_cmd_prefix', docker_settings.get('run_cmd_prefix', '')) + + all_gpus = i.get('docker_all_gpus', docker_settings.get('all_gpus')) + + device = i.get('docker_device', docker_settings.get('device')) + + r = check_gh_token(i, docker_settings, quiet) + if r['return'] >0 : return r + gh_token = r['gh_token'] + + + port_maps = i.get('docker_port_maps', docker_settings.get('port_maps', [])) + + shm_size = i.get('docker_shm_size', docker_settings.get('shm_size', '')) + + extra_run_args = i.get('docker_extra_run_args', docker_settings.get('extra_run_args', '')) + + if detached == '': + detached = docker_settings.get('detached', '') + + if interactive == '': + interactive = docker_settings.get('interactive', '') + +# # Regenerate run_cmd +# if i.get('cmd'): +# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') ) +# elif i.get('artifact'): +# run_cmd = "cm run script "+i['artifact'] +# elif i.get('tags'): +# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\"" +# else: +# run_cmd = "" + + + + r = regenerate_script_cmd({'script_uid':script_uid, + 'script_alias':script_alias, + 'tags':tags, + 'run_cmd':i_run_cmd, + 'docker_settings':docker_settings, + 'docker_run_cmd_prefix':i.get('docker_run_cmd_prefix','')}) + if r['return']>0: return r + + run_cmd = r['run_cmd_string'] + ' ' + container_env_string + ' --docker_run_deps ' + + env['CM_RUN_STATE_DOCKER'] = True + + if docker_settings.get('mount_current_dir','')=='yes': + run_cmd = 'cd '+current_path_target+' && '+run_cmd + + final_run_cmd = run_cmd if docker_skip_run_cmd not in [ 'yes', True, 'True' ] else 'cm version' + + print ('') + print ('CM command line regenerated to be used inside Docker:') + print ('') + print (final_run_cmd) + print ('') + + + cm_docker_input = {'action': 'run', + 'automation': 'script', + 'tags': 'run,docker,container', + 'recreate': 'yes', + 'docker_base_image': docker_base_image, + 'docker_os': docker_os, + 'docker_os_version': docker_os_version, + 'cm_repo': cm_repo, + 'env': env, + 'image_repo': image_repo, + 'interactive': interactive, + 'mounts': mounts, + 'image_name': 'cm-script-'+script_alias, +# 'image_tag': script_alias, + 'detached': detached, + 'script_tags': f'{tag_string}', + 'run_cmd': final_run_cmd, + 'v': i.get('v', False), + 'quiet': True, + 'pre_run_cmds': docker_pre_run_cmds, + 'real_run': True, + 'add_deps_recursive': { + 'build-docker-image': { + 'dockerfile': dockerfile_path + } + } + } + + if all_gpus: + cm_docker_input['all_gpus'] = True + + if device: + cm_docker_input['device'] = device + + if gh_token != '': + cm_docker_input['gh_token'] = gh_token + + if port_maps: + cm_docker_input['port_maps'] = port_maps + + if shm_size != '': + cm_docker_input['shm_size'] = shm_size + + if extra_run_args != '': + cm_docker_input['extra_run_args'] = extra_run_args + + if i.get('docker_save_script', ''): + cm_docker_input['save_script'] = i['docker_save_script'] + + print ('') + + r = self_module.cmind.access(cm_docker_input) + if r['return'] > 0: + return r + + + return {'return':0} + +############################################################ +def check_gh_token(i, docker_settings, quiet): + gh_token = i.get('docker_gh_token', '') + + if docker_settings.get('gh_token_required', False) and gh_token == '': + rx = {'return':1, 'error':'GH token is required but not provided. Use --docker_gh_token to set it'} + + if quiet: + return rx + + print ('') + gh_token = input ('Enter GitHub token to access private CM repositories required for this CM script: ') + + if gh_token == '': + return rx + + return {'return':0, 'gh_token': gh_token} diff --git a/automation/script/template-ae-python/README-extra.md b/automation/script/template-ae-python/README-extra.md new file mode 100644 index 0000000000..05e53dc1a0 --- /dev/null +++ b/automation/script/template-ae-python/README-extra.md @@ -0,0 +1,2 @@ +# CM script to run and reproduce experiments + diff --git a/automation/script/template-ae-python/_cm.yaml b/automation/script/template-ae-python/_cm.yaml new file mode 100644 index 0000000000..8019b3647e --- /dev/null +++ b/automation/script/template-ae-python/_cm.yaml @@ -0,0 +1,38 @@ +cache: false + +deps: + # Detect host OS features + - tags: detect,os + + # Detect/install python + - tags: get,python + names: + - python + - python3 + +script_name: run + +input_mapping: + experiment: CM_EXPERIMENT + +default_env: + CM_EXPERIMENT: '1' + +variations: + install_deps: + script_name: install_deps + + run: + script_name: run + + reproduce: + script_name: reproduce + + plot: + script_name: plot + + analyze: + script_name: analyze + + validate: + script_name: validate diff --git a/automation/script/template-ae-python/analyze.bat b/automation/script/template-ae-python/analyze.bat new file mode 100644 index 0000000000..7e786771ae --- /dev/null +++ b/automation/script/template-ae-python/analyze.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/analyze.sh b/automation/script/template-ae-python/analyze.sh new file mode 100644 index 0000000000..630c3db3dd --- /dev/null +++ b/automation/script/template-ae-python/analyze.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/customize.py b/automation/script/template-ae-python/customize.py new file mode 100644 index 0000000000..d12f9b3e1d --- /dev/null +++ b/automation/script/template-ae-python/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/automation/script/template-ae-python/install_deps.bat b/automation/script/template-ae-python/install_deps.bat new file mode 100644 index 0000000000..47f7e7ce26 --- /dev/null +++ b/automation/script/template-ae-python/install_deps.bat @@ -0,0 +1,18 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) diff --git a/automation/script/template-ae-python/install_deps.sh b/automation/script/template-ae-python/install_deps.sh new file mode 100644 index 0000000000..cb7c44c2bc --- /dev/null +++ b/automation/script/template-ae-python/install_deps.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 +fi diff --git a/automation/script/template-ae-python/main.py b/automation/script/template-ae-python/main.py new file mode 100644 index 0000000000..d851f1450f --- /dev/null +++ b/automation/script/template-ae-python/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print ('') + print ('Main script:') + print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) + print ('') + + exit(0) diff --git a/automation/script/template-ae-python/plot.bat b/automation/script/template-ae-python/plot.bat new file mode 100644 index 0000000000..7e786771ae --- /dev/null +++ b/automation/script/template-ae-python/plot.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/plot.sh b/automation/script/template-ae-python/plot.sh new file mode 100644 index 0000000000..630c3db3dd --- /dev/null +++ b/automation/script/template-ae-python/plot.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/reproduce.bat b/automation/script/template-ae-python/reproduce.bat new file mode 100644 index 0000000000..7e786771ae --- /dev/null +++ b/automation/script/template-ae-python/reproduce.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/reproduce.sh b/automation/script/template-ae-python/reproduce.sh new file mode 100644 index 0000000000..630c3db3dd --- /dev/null +++ b/automation/script/template-ae-python/reproduce.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/run.bat b/automation/script/template-ae-python/run.bat new file mode 100644 index 0000000000..6c1274ce64 --- /dev/null +++ b/automation/script/template-ae-python/run.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/run.sh b/automation/script/template-ae-python/run.sh new file mode 100644 index 0000000000..2150b45dcd --- /dev/null +++ b/automation/script/template-ae-python/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +echo "" +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/validate.bat b/automation/script/template-ae-python/validate.bat new file mode 100644 index 0000000000..7e786771ae --- /dev/null +++ b/automation/script/template-ae-python/validate.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/validate.sh b/automation/script/template-ae-python/validate.sh new file mode 100644 index 0000000000..630c3db3dd --- /dev/null +++ b/automation/script/template-ae-python/validate.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/automation/script/template-python/README-extra.md b/automation/script/template-python/README-extra.md new file mode 100644 index 0000000000..582991f6d2 --- /dev/null +++ b/automation/script/template-python/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/automation/script/template-python/_cm.yaml b/automation/script/template-python/_cm.yaml new file mode 100644 index 0000000000..adbb8d4e7c --- /dev/null +++ b/automation/script/template-python/_cm.yaml @@ -0,0 +1,23 @@ +cache: false + +deps: + # Detect host OS features + - tags: detect,os + + # Detect/install python + - tags: get,python + names: + - python + - python3 + +input_mapping: + var1: CM_VAR1 + req: PIP_REQUIREMENTS + +default_env: + CM_VAR1: 'something' + +variations: + req: + env: + PIP_REQUIREMENTS: True diff --git a/automation/script/template-python/customize.py b/automation/script/template-python/customize.py new file mode 100644 index 0000000000..10214b87df --- /dev/null +++ b/automation/script/template-python/customize.py @@ -0,0 +1,30 @@ +from cmind import utils +import os + +def preprocess(i): + + print ('') + print ('Preprocessing ...') + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) + + return {'return':0} + +def postprocess(i): + + print ('') + print ('Postprocessing ...') + + env = i['env'] + + return {'return':0} diff --git a/automation/script/template-python/main.py b/automation/script/template-python/main.py new file mode 100644 index 0000000000..9ba7bb751d --- /dev/null +++ b/automation/script/template-python/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print ('') + print ('Main script:') + print ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) + print ('') + + exit(0) diff --git a/automation/script/template-python/requirements.txt b/automation/script/template-python/requirements.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/script/template-python/run.bat b/automation/script/template-python/run.bat new file mode 100644 index 0000000000..f9e1264bc8 --- /dev/null +++ b/automation/script/template-python/run.bat @@ -0,0 +1,25 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% +echo ENV CM_VAR1: %CM_VAR1% + +if "%PIP_REQUIREMENTS%" == "True" ( + if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-python/run.sh b/automation/script/template-python/run.sh new file mode 100644 index 0000000000..a1a6aec2e2 --- /dev/null +++ b/automation/script/template-python/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" +echo "ENV CM_VAR1: ${CM_VAR1}" + +if [ "${PIP_REQUIREMENTS}" == "True" ]; then + if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 + fi +fi + +echo "" +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +test $? -eq 0 || exit 1 diff --git a/automation/script/template-pytorch/README-extra.md b/automation/script/template-pytorch/README-extra.md new file mode 100644 index 0000000000..582991f6d2 --- /dev/null +++ b/automation/script/template-pytorch/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/automation/script/template-pytorch/_cm.yaml b/automation/script/template-pytorch/_cm.yaml new file mode 100644 index 0000000000..eaff95e47d --- /dev/null +++ b/automation/script/template-pytorch/_cm.yaml @@ -0,0 +1,42 @@ +cache: false + +deps: + # Detect host OS features + - tags: detect,os + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + - tags: get,generic-python-lib,_torch + skip_if_env: + USE_CUDA: + - yes + + - tags: get,generic-python-lib,_torch_cuda + enable_if_env: + USE_CUDA: + - yes + + - tags: get,generic-python-lib,_package.numpy + + +input_mapping: + var1: CM_VAR1 + req: PIP_REQUIREMENTS + +default_env: + CM_VAR1: 'something' + +variations: + req: + env: + PIP_REQUIREMENTS: True + + cuda: + env: + USE_CUDA: yes + deps: + - tags: get,cuda diff --git a/automation/script/template-pytorch/customize.py b/automation/script/template-pytorch/customize.py new file mode 100644 index 0000000000..10214b87df --- /dev/null +++ b/automation/script/template-pytorch/customize.py @@ -0,0 +1,30 @@ +from cmind import utils +import os + +def preprocess(i): + + print ('') + print ('Preprocessing ...') + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) + + return {'return':0} + +def postprocess(i): + + print ('') + print ('Postprocessing ...') + + env = i['env'] + + return {'return':0} diff --git a/automation/script/template-pytorch/main.py b/automation/script/template-pytorch/main.py new file mode 100644 index 0000000000..3e49da450f --- /dev/null +++ b/automation/script/template-pytorch/main.py @@ -0,0 +1,15 @@ +import os + +import torch + +if __name__ == "__main__": + + print ('') + print ('Main script:') + print ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) + print ('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA',''))) + print ('') + print ('PyTorch version: {}'.format(torch.__version__)) + print ('') + + exit(0) diff --git a/automation/script/template-pytorch/requirements.txt b/automation/script/template-pytorch/requirements.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/script/template-pytorch/run.bat b/automation/script/template-pytorch/run.bat new file mode 100644 index 0000000000..f9e1264bc8 --- /dev/null +++ b/automation/script/template-pytorch/run.bat @@ -0,0 +1,25 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% +echo ENV CM_VAR1: %CM_VAR1% + +if "%PIP_REQUIREMENTS%" == "True" ( + if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-pytorch/run.sh b/automation/script/template-pytorch/run.sh new file mode 100644 index 0000000000..a1a6aec2e2 --- /dev/null +++ b/automation/script/template-pytorch/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" +echo "ENV CM_VAR1: ${CM_VAR1}" + +if [ "${PIP_REQUIREMENTS}" == "True" ]; then + if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 + fi +fi + +echo "" +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +test $? -eq 0 || exit 1 diff --git a/automation/script/template/README-extra.md b/automation/script/template/README-extra.md new file mode 100644 index 0000000000..582991f6d2 --- /dev/null +++ b/automation/script/template/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/automation/script/template/customize.py b/automation/script/template/customize.py new file mode 100644 index 0000000000..d12f9b3e1d --- /dev/null +++ b/automation/script/template/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/automation/script/template/run.bat b/automation/script/template/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/automation/script/template/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/automation/script/template/run.sh b/automation/script/template/run.sh new file mode 100644 index 0000000000..3a584c10cf --- /dev/null +++ b/automation/script/template/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/automation/script/template_list_of_scripts.md b/automation/script/template_list_of_scripts.md new file mode 100644 index 0000000000..9158bc8e16 --- /dev/null +++ b/automation/script/template_list_of_scripts.md @@ -0,0 +1,52 @@ +[ [Back to index](README.md) ] + + + +This is an automatically generated list of portable and reusable automation recipes (CM scripts) +with a [human-friendly interface (CM)](https://github.com/mlcommons/ck) +to run a growing number of ad-hoc MLPerf, MLOps, and DevOps scripts +from [MLCommons projects](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) +and [research papers](https://www.youtube.com/watch?v=7zpeIVwICa4) +in a unified way on any operating system with any software and hardware +natively or inside containers. + +Click on any automation recipe below to learn how to run and reuse it +via CM command line, Python API or GUI. + +CM scripts can easily chained together into automation workflows using `deps` and `tags` keys +while automatically updating all environment variables and paths +for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml). + + +*Note that CM is a community project being developed and extended by [MLCommons members and individual contributors](../CONTRIBUTING.md) - + you can find source code of CM scripts maintained by MLCommons [here](../cm-mlops/script). + Please join [Discord server](https://discord.gg/JjWNWXKxwT) to participate in collaborative developments or provide your feedback.* + + +# License + +[Apache 2.0](LICENSE.md) + + +# Copyright + +2022-2024 [MLCommons](https://mlcommons.org) + + + + + +# List of CM scripts by categories + +{{CM_TOC_CATEGORIES}} + +{{CM_TOC2}} + +# List of all sorted CM scripts + +{{CM_TOC}} + + +{{CM_MAIN}} diff --git a/automation/utils/README.md b/automation/utils/README.md new file mode 100644 index 0000000000..9a844c6566 --- /dev/null +++ b/automation/utils/README.md @@ -0,0 +1,387 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)) + * CM CLI with UID: ```cm test utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### get_host_os_info + + * CM CLI: ```cm get_host_os_info utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)) + * CM CLI with UID: ```cm get_host_os_info utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'get_host_os_info' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### download_file + + * CM CLI: ```cm download_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)) + * CM CLI with UID: ```cm download_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'download_file' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### unzip_file + + * CM CLI: ```cm unzip_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)) + * CM CLI with UID: ```cm unzip_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'unzip_file' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### compare_versions + + * CM CLI: ```cm compare_versions utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)) + * CM CLI with UID: ```cm compare_versions utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'compare_versions' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### json2yaml + + * CM CLI: ```cm json2yaml utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)) + * CM CLI with UID: ```cm json2yaml utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'json2yaml' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### yaml2json + + * CM CLI: ```cm yaml2json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)) + * CM CLI with UID: ```cm yaml2json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'yaml2json' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### sort_json + + * CM CLI: ```cm sort_json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)) + * CM CLI with UID: ```cm sort_json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'sort_json' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### dos2unix + + * CM CLI: ```cm dos2unix utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)) + * CM CLI with UID: ```cm dos2unix utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'dos2unix' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### replace_string_in_file + + * CM CLI: ```cm replace_string_in_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)) + * CM CLI with UID: ```cm replace_string_in_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'replace_string_in_file' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### create_toc_from_md + + * CM CLI: ```cm create_toc_from_md utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)) + * CM CLI with UID: ```cm create_toc_from_md utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'create_toc_from_md' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### copy_to_clipboard + + * CM CLI: ```cm copy_to_clipboard utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)) + * CM CLI with UID: ```cm copy_to_clipboard utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'copy_to_clipboard' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### list_files_recursively + + * CM CLI: ```cm list_files_recursively utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)) + * CM CLI with UID: ```cm list_files_recursively utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'list_files_recursively' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### generate_secret + + * CM CLI: ```cm generate_secret utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)) + * CM CLI with UID: ```cm generate_secret utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'generate_secret' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### detect_tags_in_artifact + + * CM CLI: ```cm detect_tags_in_artifact utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)) + * CM CLI with UID: ```cm detect_tags_in_artifact utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'detect_tags_in_artifact' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### prune_input + + * CM CLI: ```cm prune_input utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)) + * CM CLI with UID: ```cm prune_input utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'prune_input' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### uid + + * CM CLI: ```cm uid utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)) + * CM CLI with UID: ```cm uid utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'uid' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### system + + * CM CLI: ```cm system utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)) + * CM CLI with UID: ```cm system utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'system' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### load_cfg + + * CM CLI: ```cm load_cfg utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)) + * CM CLI with UID: ```cm load_cfg utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'load_cfg' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/utils/_cm.json b/automation/utils/_cm.json new file mode 100644 index 0000000000..f2dc9c5b66 --- /dev/null +++ b/automation/utils/_cm.json @@ -0,0 +1,12 @@ +{ + "alias": "utils", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Accessing various CM utils", + "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", + "sort": 800, + "tags": [ + "automation" + ], + "uid": "dc2743f8450541e3" +} diff --git a/automation/utils/module.py b/automation/utils/module.py new file mode 100644 index 0000000000..084431a39b --- /dev/null +++ b/automation/utils/module.py @@ -0,0 +1,986 @@ +import os + +from cmind.automation import Automation +from cmind import utils + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print (json.dumps(i, indent=2)) + + return {'return':0} + + ############################################################################## + def get_host_os_info(self, i): + """ + Get some host platform name (currently windows or linux) and OS bits + + Args: + (CM input dict): + + (bits) (str): force host platform bits + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * info (dict): + * platform (str): "windows", "linux" or "darwin" + * bat_ext (str): ".bat" or ".sh" + * bits (str): 32 or 64 bits + * python_bits 9str): python bits + + """ + + import os + import platform + import struct + + info = {} + + pbits = str(8 * struct.calcsize("P")) + + if platform.system().lower().startswith('win'): + platform = 'windows' + info['bat_ext']='.bat' + info['set_env']='set ${key}=${value}' + info['env_separator']=';' + info['env_var']='%env_var%' + info['bat_rem']='rem ${rem}' + info['run_local_bat']='call ${bat_file}' + info['run_local_bat_from_python']='call ${bat_file}' + info['run_bat']='call ${bat_file}' + info['start_script']=['@echo off', ''] + info['env']={ + "CM_WINDOWS":"yes" + } + else: + if platform.system().lower().startswith('darwin'): + platform = 'darwin' + else: + platform = 'linux' + + info['bat_ext']='.sh' + info['set_env']='export ${key}="${value}"' + info['env_separator']=':' + info['env_var']='${env_var}' + info['set_exec_file']='chmod 755 "${file_name}"' + info['bat_rem']='# ${rem}' + info['run_local_bat']='. ./${bat_file}' + info['run_local_bat_from_python']='bash -c ". ./${bat_file}"' + info['run_bat']='. ${bat_file}' + info['start_script']=['#!/bin/bash', ''] + info['env']={} + + info['platform'] = platform + + obits = i.get('bits', '') + if obits == '': + obits = '32' + if platform == 'windows': + # Trying to get fast way to detect bits + if os.environ.get('ProgramW6432', '') != '' or os.environ.get('ProgramFiles(x86)', '') != '': # pragma: no cover + obits = '64' + else: + # On Linux use first getconf LONG_BIT and if doesn't work use python bits + + obits = pbits + + r = utils.gen_tmp_file({}) + if r['return'] > 0: + return r + + fn = r['file_name'] + + cmd = 'getconf LONG_BIT > '+fn + rx = os.system(cmd) + + if rx == 0: + r = utils.load_txt(file_name = fn, remove_after_read = True) + + if r['return'] == 0: + s = r['string'].strip() + if len(s) > 0 and len(s) < 4: + obits = s + else: + if os.path.isfile(fn): os.remove(fn) + + info['bits'] = obits + info['python_bits'] = pbits + + return {'return': 0, 'info': info} + + ############################################################################## + def download_file(self, i): + """ + Download file using requests + + Args: + (CM input dict): + + url (str): URL with file + (filename) (str): explicit file name + (path) (str): path to record file (or current if empty) + (chunk_size) (int): chunck size in bytes (65536 by default) + (text) (str): print text before downloaded status ("Downloaded: " by default) + (verify) (bool): verify SSL certificate if True (True by default) + can be switched by global env CM_UTILS_DOWNLOAD_VERIFY_SSL = no + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * filename (str): file name + * path (str): path to file + * size (int): file size + + """ + + import requests + import time + import sys + from urllib import parse + + # Get URL + url = i['url'] + + # Check file name + file_name = i.get('filename','') + if file_name == '': + parsed_url = parse.urlparse(url) + file_name = os.path.basename(parsed_url.path) + + # Check path + path = i.get('path','') + if path is None or path=='': + path = os.getcwd() + + # Output file + path_to_file = os.path.join(path, file_name) + + if os.path.isfile(path_to_file): + os.remove(path_to_file) + + print ('Downloading to {}'.format(path_to_file)) + print ('') + + # Download + size = -1 + downloaded = 0 + chunk_size = i.get('chunk_size', 65536) + + text = i.get('text','Downloaded: ') + + if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ: + verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes' + else: + verify = i.get('verify', True) + + try: + with requests.get(url, stream=True, allow_redirects=True, verify=verify) as download: + download.raise_for_status() + + size_string = download.headers.get('Content-Length') + + if size_string is None: + transfer_encoding = download.headers.get('Transfer-Encoding', '') + if transfer_encoding != 'chunked': + return {'return':1, 'error':'did not receive file'} + else: + size_string = "0" + + size = int(size_string) + + with open(path_to_file, 'wb') as output: + for chunk in download.iter_content(chunk_size = chunk_size): + + if chunk: + output.write(chunk) + if size == 0: + continue + downloaded+=1 + percent = downloaded * chunk_size * 100 / size + + sys.stdout.write("\r{}{:3.0f}%".format(text, percent)) + sys.stdout.flush() + + sys.stdout.write("\r{}{:3.0f}%".format(text, 100)) + sys.stdout.flush() + + except Exception as e: + return {'return':1, 'error':format(e)} + + print ('') + if size == 0: + file_stats=os.stat(path_to_file) + size = file_stats.st_size + + return {'return': 0, 'filename':file_name, 'path': path_to_file, 'size':size} + + ############################################################################## + def unzip_file(self, i): + """ + Unzip file + + Args: + (CM input dict): + + filename (str): explicit file name + (path) (str): path where to unzip file (current path otherwise) + (strip_folders) (int): strip first folders + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import zipfile + + # Check file name + file_name = i['filename'] + + if not os.path.isfile(file_name): + return {'return':1, 'error':'file {} not found'.format(file_name)} + + console = i.get('out') == 'con' + + # Attempt to read cmr.json + file_name_handle = open(file_name, 'rb') + file_name_zip = zipfile.ZipFile(file_name_handle) + + info_files=file_name_zip.infolist() + + path=i.get('path','') + if path is None or path=='': + path=os.getcwd() + + strip_folders = i.get('strip_folders',0) + + # Unpacking zip + for info in info_files: + f = info.filename + permissions = info.external_attr + + if not f.startswith('..') and not f.startswith('/') and not f.startswith('\\'): + f_zip = f + + if strip_folders>0: + fsplit = f.split('/') # Zip standard on all OS + f = '/'.join(fsplit[strip_folders:]) + + file_path = os.path.join(path, f) + + if f.endswith('/'): + # create directory + if not os.path.exists(file_path): + os.makedirs(file_path) + else: + dir_name = os.path.dirname(file_path) + if not os.path.exists(dir_name): + os.makedirs(dir_name) + + # extract file + file_out = open(file_path, 'wb') + file_out.write(file_name_zip.read(f_zip)) + file_out.close() + + if permissions > 0xffff: + os.chmod(file_path, permissions >> 16) + + file_name_zip.close() + file_name_handle.close() + + return {'return':0} + + ############################################################################## + def compare_versions(self, i): + """ + Compare versions + + Args: + + version1 (str): version 1 + version2 (str): version 2 + + Returns: + (CM return dict): + + * comparison (int): 1 - version 1 > version 2 + 0 - version 1 == version 2 + -1 - version 1 < version 2 + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + version1 = i['version1'] + version2 = i['version2'] + + l_version1 = version1.split('.') + l_version2 = version2.split('.') + + # 3.9.6 vs 3.9 + # 3.9 vs 3.9.6 + + i_version1 = [int(v) if v.isdigit() else v for v in l_version1] + i_version2 = [int(v) if v.isdigit() else v for v in l_version2] + + comparison = 0 + + for index in range(max(len(i_version1), len(i_version2))): + v1 = i_version1[index] if index < len(i_version1) else 0 + v2 = i_version2[index] if index < len(i_version2) else 0 + + if v1 > v2: + comparison = 1 + break + elif v1 < v2: + comparison = -1 + break + + return {'return':0, 'comparison': comparison} + + ############################################################################## + def json2yaml(self, i): + """ + Convert JSON file to YAML + + Args: + + input (str): input file (.json) + (output) (str): output file (.yaml) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input','') + + if input_file == '': + return {'return':1, 'error':'please specify --input={json file}'} + + output_file = i.get('output','') + + r = utils.load_json(input_file, check_if_exists = True) + if r['return']>0: return r + + meta = r['meta'] + + if output_file=='': + output_file = input_file[:-5] if input_file.endswith('.json') else input_file + output_file+='.yaml' + + r = utils.save_yaml(output_file, meta) + if r['return']>0: return r + + return {'return':0} + + ############################################################################## + def yaml2json(self, i): + """ + Convert YAML file to JSON + + Args: + + input (str): input file (.yaml) + (output) (str): output file (.json) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input','') + + if input_file == '': + return {'return':1, 'error':'please specify --input={yaml file}'} + + output_file = i.get('output','') + + r = utils.load_yaml(input_file, check_if_exists = True) + if r['return']>0: return r + + meta = r['meta'] + + if output_file=='': + output_file = input_file[:-5] if input_file.endswith('.yaml') else input_file + output_file+='.json' + + r = utils.save_json(output_file, meta) + if r['return']>0: return r + + return {'return':0} + + ############################################################################## + def sort_json(self, i): + """ + Sort JSON file + + Args: + + input (str): input file (.json) + (output) (str): output file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input','') + + if input_file == '': + return {'return':1, 'error':'please specify --input={json file}'} + + r = utils.load_json(input_file, check_if_exists = True) + if r['return']>0: return r + + meta = r['meta'] + + output_file = i.get('output','') + + if output_file=='': + output_file = input_file + + r = utils.save_json(output_file, meta, sort_keys=True) + if r['return']>0: return r + + return {'return':0} + + ############################################################################## + def dos2unix(self, i): + """ + Convert DOS file to UNIX (remove \r) + + Args: + + input (str): input file (.txt) + (output) (str): output file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input','') + + if input_file == '': + return {'return':1, 'error':'please specify --input={txt file}'} + + r = utils.load_txt(input_file, check_if_exists = True) + if r['return']>0: return r + + s = r['string'].replace('\r','') + + output_file = i.get('output','') + + if output_file=='': + output_file = input_file + + r = utils.save_txt(output_file, s) + if r['return']>0: return r + + return {'return':0} + + ############################################################################## + def replace_string_in_file(self, i): + """ + Convert DOS file to UNIX (remove \r) + + Args: + + input (str): input file (.txt) + (output) (str): output file + string (str): string to replace + replacement (str): replacement string + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (update) (bool): True if file was upated + """ + + input_file = i.get('input', '') + if input_file == '': + return {'return':1, 'error':'please specify --input={txt file}'} + + string = i.get('string', '') + if string == '': + return {'return':1, 'error':'please specify --string={string to replace}'} + + replacement = i.get('replacement', '') + if replacement == '': + return {'return':1, 'error':'please specify --replacement={string to replace}'} + + output_file = i.get('output','') + + if output_file=='': + output_file = input_file + + r = utils.load_txt(input_file, check_if_exists = True) + if r['return']>0: return r + + s = r['string'].replace('\r','') + + s = s.replace(string, replacement) + + r = utils.save_txt(output_file, s) + if r['return']>0: return r + + return {'return':0} + + ############################################################################## + def create_toc_from_md(self, i): + """ + Convert DOS file to UNIX (remove \r) + + Args: + + input (str): input file (.md) + (output) (str): output file (input+'.toc) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input', '') + if input_file == '': + return {'return':1, 'error':'please specify --input={txt file}'} + + output_file = i.get('output','') + + if output_file=='': + output_file = input_file + '.toc' + + r = utils.load_txt(input_file, check_if_exists = True) + if r['return']>0: return r + + lines = r['string'].split('\n') + + toc = [] + + toc.append('
') + toc.append('Click here to see the table of contents.') + toc.append('') + + for line in lines: + line = line.strip() + + if line.startswith('#'): + j = line.find(' ') + if j>=0: + title = line[j:].strip() + + x = title.lower().replace(' ','-') + + for k in range(0,2): + if x.startswith('*'): + x=x[1:] + if x.endswith('*'): + x=x[:-1] + + for z in [':', '+', '.', '(', ')', ',']: + x = x.replace(z, '') + + y = ' '*(2*(j-1)) + '* ['+title+'](#'+x+')' + + toc.append(y) + + toc.append('') + toc.append('
') + + r = utils.save_txt(output_file, '\n'.join(toc)+'\n') + if r['return']>0: return r + + return {'return':0} + + ############################################################################## + def copy_to_clipboard(self, i): + """ + Copy string to a clipboard + + Args: + + string (str): string to copy to a clipboard + (add_quotes) (bool): add quotes to the string in a clipboard + (skip_fail) (bool): if True, do not fail + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + s = i.get('string','') + + if i.get('add_quotes',False): s='"'+s+'"' + + failed = False + warning = '' + + # Try to load pyperclip (seems to work fine on Windows) + try: + import pyperclip + except Exception as e: + warning = format(e) + failed = True + pass + + if not failed: + pyperclip.copy(s) + else: + failed = False + + # Try to load Tkinter + try: + from Tkinter import Tk + except ImportError as e: + warning = format(e) + failed = True + pass + + if failed: + failed = False + try: + from tkinter import Tk + except ImportError as e: + warning = format(e) + failed = True + pass + + if not failed: + # Copy to clipboard + try: + r = Tk() + r.withdraw() + r.clipboard_clear() + r.clipboard_append(s) + r.update() + r.destroy() + except Exception as e: + failed = True + warning = format(e) + + rr = {'return':0} + + if failed: + if not i.get('skip_fail',False): + return {'return':1, 'error':warning} + + rr['warning']=warning + + return rr + + ############################################################################## + def list_files_recursively(self, i): + """ + List files and concatenate into string separate by comma + + Args: + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + files = os.walk('.') + + s = '' + + for (dir_path, dir_names, file_names) in files: + for f in file_names: + if s!='': s+=',' + + if dir_path=='.': + dir_path2='' + else: + dir_path2=dir_path[2:].replace('\\','/')+'/' + + s+=dir_path2+f + + print (s) + + return {'return':0} + + ############################################################################## + def generate_secret(self, i): + """ + Generate secret for web apps + + Args: + + Returns: + (CM return dict): + + secret (str): secret + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + import secrets + s = secrets.token_urlsafe(16) + + print (s) + + return {'return':0, 'secret': s} + + ############################################################################## + def detect_tags_in_artifact(self, i): + """ + Detect if there are tags in an artifact name (spaces) and update input + + Args: + + input (dict) : original input + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + inp = i['input'] + + artifact = inp.get('artifact','') + if artifact == '.': + del(inp['artifact']) + elif ' ' in artifact: # or ',' in artifact: + del(inp['artifact']) + if 'parsed_artifact' in inp: del(inp['parsed_artifact']) + # Force substitute tags + inp['tags']=artifact.replace(' ',',') + + return {'return':0} + + ############################################################################## + def prune_input(self, i): + """ + Leave only input keys and remove the rest (to regenerate CM commands) + + Args: + + input (dict) : original input + (extra_keys_starts_with) (list): remove keys that starts + with the ones from this list + + Returns: + (CM return dict): + + new_input (dict): pruned input + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + import copy + + inp = i['input'] + extra_keys = i.get('extra_keys_starts_with',[]) + + i_run_cmd_arc = copy.deepcopy(inp) + for k in inp: + remove = False + if k in ['action', 'automation', 'cmd', 'out', 'parsed_automation', 'parsed_artifact', 'self_module']: + remove = True + if not remove: + for ek in extra_keys: + if k.startswith(ek): + remove = True + break + + if remove: + del(i_run_cmd_arc[k]) + + return {'return':0, 'new_input':i_run_cmd_arc} + + + ############################################################################## + def uid(self, i): + """ + Generate CM UID. + + Args: + (CM input dict): empty dict + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * uid (str): CM UID + """ + + console = i.get('out') == 'con' + + r = utils.gen_uid() + + if console: + print (r['uid']) + + return r + + + ############################################################################## + def system(self, i): + """ + Run system command and redirect output to string. + + Args: + (CM input dict): + + * cmd (str): command line + * (path) (str): go to this directory and return back to current + * (stdout) (str): stdout file + * (stderr) (str): stderr file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * ret (int): return code + * std (str): stdout + stderr + * stdout (str): stdout + * stderr (str): stderr + """ + + cmd = i['cmd'] + + if cmd == '': + return {'return':1, 'error': 'cmd is empty'} + + path = i.get('path','') + if path!='' and os.path.isdir(path): + cur_dir = os.getcwd() + os.chdir(path) + + if i.get('stdout','')!='': + fn1=i['stdout'] + fn1_delete = False + else: + r = utils.gen_tmp_file({}) + if r['return'] > 0: return r + fn1 = r['file_name'] + fn1_delete = True + + if i.get('stderr','')!='': + fn2=i['stderr'] + fn2_delete = False + else: + r = utils.gen_tmp_file({}) + if r['return'] > 0: return r + fn2 = r['file_name'] + fn2_delete = True + + cmd += ' > '+fn1 + ' 2> '+fn2 + rx = os.system(cmd) + + std = '' + stdout = '' + stderr = '' + + if os.path.isfile(fn1): + r = utils.load_txt(file_name = fn1, remove_after_read = fn1_delete) + if r['return'] == 0: stdout = r['string'].strip() + + if os.path.isfile(fn2): + r = utils.load_txt(file_name = fn2, remove_after_read = fn2_delete) + if r['return'] == 0: stderr = r['string'].strip() + + std = stdout + if stderr!='': + if std!='': std+='\n' + std+=stderr + + if path!='' and os.path.isdir(path): + os.chdir(cur_dir) + + return {'return':0, 'ret':rx, 'stdout':stdout, 'stderr':stderr, 'std':std} + + ############################################################ + def load_cfg(self, i): + """ + Load configuration artifacts and files + + Args: + (CM input dict): + + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module(self, __file__, 'module_cfg', 'load_cfg', i) diff --git a/automation/utils/module_cfg.py b/automation/utils/module_cfg.py new file mode 100644 index 0000000000..b70f2145cf --- /dev/null +++ b/automation/utils/module_cfg.py @@ -0,0 +1,225 @@ +import os +import cmind +import copy + +base_path={} +base_path_meta={} + +################################################################################## +def load_cfg(i): + + tags = i.get('tags','') + artifact = i.get('artifact','') + + key = i.get('key', '') + key_end = i.get('key_end', []) + + ii={'action':'find', + 'automation':'cfg'} + if artifact!='': + ii['artifact']=artifact + elif tags!='': + ii['tags']=tags + + r=cmind.access(ii) + if r['return']>0: return r + + lst = r['list'] + + prune = i.get('prune',{}) + prune_key = prune.get('key', '') + prune_key_uid = prune.get('key_uid', '') + prune_meta_key = prune.get('meta_key', '') + prune_meta_key_uid = prune.get('meta_key_uid', '') + prune_uid = prune.get('uid', '') + prune_list = prune.get('list',[]) + + # Checking individual files inside CM entry + selection = [] + + if i.get('skip_files', False): + for l in lst: + meta = l.meta + full_path = l.path + + meta['full_path']=full_path + + add = True + + if prune_key!='' and prune_key_uid!='': + if prune_key_uid not in meta.get(prune_key, []): + add = False + + if add: + selection.append(meta) + else: + for l in lst: + path = l.path + + main_meta = l.meta + + skip = False + + if prune_meta_key!='' and prune_meta_key_uid!='': + if prune_meta_key_uid not in main_meta.get(prune_meta_key, []): + skip = True + + if skip: + continue + + all_tags = main_meta.get('tags',[]) + + files = os.listdir(path) + + for f in files: + if key!='' and not f.startswith(key): + continue + + if f.startswith('_') or (not f.endswith('.json') and not f.endswith('.yaml')): + continue + + if len(key_end)>0: + skip = True + for ke in key_end: + if f.endswith(ke): + skip = False + break + if skip: + continue + + full_path = os.path.join(path, f) + + full_path_without_ext = full_path[:-5] + + r = cmind.utils.load_yaml_and_json(full_path_without_ext) + if r['return']>0: + print ('Warning: problem loading file {}'.format(full_path)) + else: + meta = r['meta'] + + # Check base + r = process_base(meta, full_path) + if r['return']>0: return r + meta = r['meta'] + + uid = meta['uid'] + + # Check pruning + add = True + + if len(prune)>0: + if prune_uid!='' and uid != prune_uid: + add = False + + if add and len(prune_list)>0 and uid not in prune_list: + add = False + + if add and prune_key!='' and prune_key_uid!='' and prune_key_uid != meta.get(prune_key, None): + add = False + + if add: + meta['full_path']=full_path + + add_all_tags = copy.deepcopy(all_tags) + + name = meta.get('name','') + if name=='': + name = ' '.join(meta.get('tags',[])) + name = name.strip() + meta['name'] = name + + file_tags = meta.get('tags', '').strip() + if file_tags=='': + if name!='': + add_all_tags += [v.lower() for v in name.split(' ')] + else: + add_all_tags += file_tags.split(',') + + meta['all_tags']=add_all_tags + + meta['main_meta']=main_meta + + selection.append(meta) + + return {'return':0, 'lst':lst, 'selection':selection} + +################################################################################## +def process_base(meta, full_path): + + global base_path, base_path_meta + + _base = meta.get('_base', '') + if _base != '': + name = '' + + filename = _base + full_path_base = os.path.dirname(full_path) + + if not filename.endswith('.yaml') and not filename.endswith('.json'): + return {'return':1, 'error':'_base file {} in {} must be .yaml or .json'.format(filename, full_path)} + + if ':' in _base: + x = _base.split(':') + name = x[0] + + full_path_base = base_path.get(name, '') + if full_path_base == '': + + # Find artifact + r = cmind.access({'action':'find', + 'automation':'cfg', + 'artifact':name}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + if not os.path.isfile(path): + return {'return':1, 'error':'_base artifact {} not found in {}'.format(name, full_path)} + + full_path_base = lst[0].path + + base_path[name] = full_path_base + + filename = x[1] + + # Load base + path = os.path.join(full_path_base, filename) + + if not os.path.isfile(path): + return {'return':1, 'error':'_base file {} not found in {}'.format(filename, full_path)} + + if path in base_path_meta: + base = copy.deepcopy(base_path_meta[path]) + else: + path_without_ext = path[:-5] + + r = cmind.utils.load_yaml_and_json(path_without_ext) + if r['return']>0: return r + + base = r['meta'] + + base_path_meta[path]=copy.deepcopy(base) + + for k in meta: + v = meta[k] + + if k not in base: + base[k]=v + else: + if isinstance(v, str): + # Only merge a few special keys and overwrite the rest + if k in ['tags','name']: + base[k] += meta[k] + else: + base[k] = meta[k] + + elif type(v) == list: + for vv in v: + base[k].append(vv) + elif type(v) == dict: + base[k].merge(v) + + meta = base + + return {'return':0, 'meta':meta} diff --git a/cfg/benchmark-hardware-compute/_cm.json b/cfg/benchmark-hardware-compute/_cm.json new file mode 100644 index 0000000000..6877b34a7e --- /dev/null +++ b/cfg/benchmark-hardware-compute/_cm.json @@ -0,0 +1,10 @@ +{ + "alias": "benchmark-hardware-compute", + "automation_alias": "cfg", + "automation_uid": "88dce9c160324c5d", + "tags": [ + "benchmark", + "compute" + ], + "uid": "ca67f372e7294afd" +} diff --git a/cfg/benchmark-hardware-compute/amd-cpu-x64.json b/cfg/benchmark-hardware-compute/amd-cpu-x64.json new file mode 100644 index 0000000000..53f295d729 --- /dev/null +++ b/cfg/benchmark-hardware-compute/amd-cpu-x64.json @@ -0,0 +1,6 @@ +{ + "uid": "cdfd424c32734e38", + "name": "AMD - x64", + "tags": "cpu,x64,generic,amd", + "mlperf_inference_device": "cpu" +} diff --git a/cfg/benchmark-hardware-compute/amd-gpu.json b/cfg/benchmark-hardware-compute/amd-gpu.json new file mode 100644 index 0000000000..d70e1d1554 --- /dev/null +++ b/cfg/benchmark-hardware-compute/amd-gpu.json @@ -0,0 +1,6 @@ +{ + "uid": "d8f06040f7294319", + "name": "AMD - GPU", + "tags": "gpu,amd", + "mlperf_inference_device": "rocm" +} diff --git a/cfg/benchmark-hardware-compute/generic-cpu-arm64.json b/cfg/benchmark-hardware-compute/generic-cpu-arm64.json new file mode 100644 index 0000000000..7af318b27b --- /dev/null +++ b/cfg/benchmark-hardware-compute/generic-cpu-arm64.json @@ -0,0 +1,6 @@ +{ + "uid":"357a972e79614903", + "name": "Arm - AArch64", + "tags": "cpu,arm64,aarch64,generic", + "mlperf_inference_device": "cpu" +} diff --git a/cfg/benchmark-hardware-compute/google-tpu.json b/cfg/benchmark-hardware-compute/google-tpu.json new file mode 100644 index 0000000000..2bb4d22cf5 --- /dev/null +++ b/cfg/benchmark-hardware-compute/google-tpu.json @@ -0,0 +1,6 @@ +{ + "uid": "b3be7ac9ef954f5a", + "name": "Google - TPU", + "tags": "tpu,google", + "mlperf_inference_device": "tpu" +} diff --git a/cfg/benchmark-hardware-compute/habana-gaudi.json b/cfg/benchmark-hardware-compute/habana-gaudi.json new file mode 100644 index 0000000000..b6caa96554 --- /dev/null +++ b/cfg/benchmark-hardware-compute/habana-gaudi.json @@ -0,0 +1,6 @@ +{ + "uid": "a42388a2a8cd412c", + "name": "Intel/Habana - Gauidi 2", + "tags": "gaudi,habana", + "mlperf_inference_device": "gaudi" +} diff --git a/cfg/benchmark-hardware-compute/intel-cpu-x64.json b/cfg/benchmark-hardware-compute/intel-cpu-x64.json new file mode 100644 index 0000000000..2e8ab51c4a --- /dev/null +++ b/cfg/benchmark-hardware-compute/intel-cpu-x64.json @@ -0,0 +1,6 @@ +{ + "uid": "ee8c568e0ac44f2b", + "name": "Intel - x64", + "tags": "cpu,x64,generic,intel", + "mlperf_inference_device": "cpu" +} diff --git a/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml b/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml new file mode 100644 index 0000000000..d8b9787c65 --- /dev/null +++ b/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml @@ -0,0 +1,7 @@ +uid: fe379ecd1e054a00 + +tags: gpu,nvidia,jetson,orin + +name: "Nvidia - GPU - Jetson Orin" + +mlperf_inference_device: cuda diff --git a/cfg/benchmark-hardware-compute/nvidia-gpu.json b/cfg/benchmark-hardware-compute/nvidia-gpu.json new file mode 100644 index 0000000000..5bc7582532 --- /dev/null +++ b/cfg/benchmark-hardware-compute/nvidia-gpu.json @@ -0,0 +1,6 @@ +{ + "uid": "fe379ecd1e054a00", + "name": "Nvidia - GPU", + "tags": "gpu,nvidia", + "mlperf_inference_device": "cuda" +} diff --git a/cfg/benchmark-hardware-compute/qualcomm-ai100.json b/cfg/benchmark-hardware-compute/qualcomm-ai100.json new file mode 100644 index 0000000000..aa84e57351 --- /dev/null +++ b/cfg/benchmark-hardware-compute/qualcomm-ai100.json @@ -0,0 +1,6 @@ +{ + "uid": "d2ae645066664463", + "name": "Qualcomm - AI 100", + "tags": "accelerator,acc,qualcomm,ai,100,ai-100", + "mlperf_inference_device": "qaic" +} diff --git a/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml b/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml new file mode 100644 index 0000000000..c6d06e9b43 --- /dev/null +++ b/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml @@ -0,0 +1,5 @@ +uid: 2cd26d4f92ca4b85 + +tags: stm,stm32,stm32l4r5zit6u,nucleo,l4r5zi + +name: "STM32L4R5ZIT6U - NUCLEO-L4R5ZI" diff --git a/cfg/benchmark-list/_cm.json b/cfg/benchmark-list/_cm.json new file mode 100644 index 0000000000..533c86271a --- /dev/null +++ b/cfg/benchmark-list/_cm.json @@ -0,0 +1,10 @@ +{ + "alias": "benchmark-list", + "automation_alias": "cfg", + "automation_uid": "88dce9c160324c5d", + "tags": [ + "benchmark", + "list" + ], + "uid": "15291dfc4f904146" +} diff --git a/cfg/benchmark-list/loadgen-cpp.yaml b/cfg/benchmark-list/loadgen-cpp.yaml new file mode 100644 index 0000000000..5a3f75a85a --- /dev/null +++ b/cfg/benchmark-list/loadgen-cpp.yaml @@ -0,0 +1,19 @@ +uid: f594dc94b2714713 + +tags: benchmark,run,loadgen,cpp + +name: "ML models with LoadGen (C++; Linux/MacOS/Windows) - dev" + +urls: +- name: "GitHub dev page" + url: "https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-cpp" + +supported_compute: +- cpu,x64 +- gpu,nvidia + +script_name: run-mlperf-inference-app,4a5d5b13fd7e4ac8 + +bench_input: + mlperf_inference_implementation: mil + \ No newline at end of file diff --git a/cfg/benchmark-list/loadgen-python.yaml b/cfg/benchmark-list/loadgen-python.yaml new file mode 100644 index 0000000000..0ac5805022 --- /dev/null +++ b/cfg/benchmark-list/loadgen-python.yaml @@ -0,0 +1,16 @@ +uid: 0d6b54eb27d1454e + +tags: benchmark,run,loadgen,python + +name: "ML models with LoadGen (Python; Linux/MacOS/Windows) - dev" + +urls: +- name: "GitHub dev page" + url: "https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-loadgen-generic-python" + +supported_compute: +- cpu,x64 +- cpu,arm64 +- gpu,nvidia + +script_name: app-loadgen-generic-python,d3d949cc361747a6 diff --git a/cfg/benchmark-list/mlperf-abtf.yaml b/cfg/benchmark-list/mlperf-abtf.yaml new file mode 100644 index 0000000000..a01edcbde1 --- /dev/null +++ b/cfg/benchmark-list/mlperf-abtf.yaml @@ -0,0 +1,18 @@ +uid: 94f0faaa0c61445d + +tags: benchmark,run,mlperf,abtf,mlperf-abtf + +name: "MLPerf ABTF - dev" + +urls: +- name: "Announcement" + url: "https://mlcommons.org/2023/05/avcc-and-mlcommons-join-forces-to-develop-an-automotive-industry-standard/" +- name: "MLCommons CM automation (under development)" + url: "https://access.cknowledge.org/playground/?action=scripts" + +supported_compute: +- cpu,x64 +- cpu,arm64 +- gpu,nvidia + +script_name: test-abtf-ssd-pytorch,91bfc4333b054c21 diff --git a/cfg/benchmark-list/mlperf-inference.yaml b/cfg/benchmark-list/mlperf-inference.yaml new file mode 100644 index 0000000000..e57764a486 --- /dev/null +++ b/cfg/benchmark-list/mlperf-inference.yaml @@ -0,0 +1,28 @@ +uid: 39877bb63fb54725 + +tags: benchmark,run,mlperf,inference,mlperf-inference + +name: "MLPerf inference" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/inference" +- name: "GitHub dev page" + url: "https://github.com/mlcommons/inference" +- name: "ArXiv paper" + url: "https://arxiv.org/abs/1911.02549" +- name: "MLCommons CM automation for MLPerf inference" + url: "https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference" + +script_name: run-mlperf-inference-app,4a5d5b13fd7e4ac8 + +skip_extra_urls: true + +supported_compute: +- cpu,x64 +- cpu,arm64 +- gpu,nvidia +- gpu,amd +- accelerator,acc,qualcomm,ai,100,ai-100 +- tpu,google +- gaudi,habana diff --git a/cfg/benchmark-list/mlperf-mobile.yaml b/cfg/benchmark-list/mlperf-mobile.yaml new file mode 100644 index 0000000000..85771a44d9 --- /dev/null +++ b/cfg/benchmark-list/mlperf-mobile.yaml @@ -0,0 +1,14 @@ +uid: 8b2ed0897bd74267 + +tags: benchmark,run,mlperf,mobile,mlperf-mobile + +name: "MLPerf mobile" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/inference-mobile/" +- name: "GitHub page for mobile app" + url: "https://github.com/mlcommons/mobile_app_open" + +supported_compute: +- cpu,arm64 diff --git a/cfg/benchmark-list/mlperf-tiny.yaml b/cfg/benchmark-list/mlperf-tiny.yaml new file mode 100644 index 0000000000..d6aeccabc5 --- /dev/null +++ b/cfg/benchmark-list/mlperf-tiny.yaml @@ -0,0 +1,16 @@ +uid: 28870394c19c4c37 + +tags: benchmark,run,mlperf,tiny,mlperf-tiny + +name: "MLPerf tiny" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/inference-tiny" +- name: "GitHub dev page" + url: "https://github.com/mlcommons/tiny" +- name: "MLCommons CM automation (under development)" + url: "https://github.com/mlcommons/ck/blob/master/docs/tutorials/reproduce-mlperf-tiny.md" + +supported_compute: +- stm32 diff --git a/cfg/benchmark-list/mlperf-training.yaml b/cfg/benchmark-list/mlperf-training.yaml new file mode 100644 index 0000000000..8b95de4f73 --- /dev/null +++ b/cfg/benchmark-list/mlperf-training.yaml @@ -0,0 +1,18 @@ +uid: 59311e6098c14b22 + +tags: benchmark,run,mlperf,training,mlperf-training + +name: "MLPerf training" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/training" +- name: "GitHub dev page" + url: "https://github.com/mlcommons/training" +- name: "MLCommons CM automation (under development)" + url: "https://github.com/mlcommons/ck/blob/master/docs/tutorials/reproduce-mlperf-training.md" + +supported_compute: +- cpu,x64 +- gpu,nvidia +- tpu,google diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml b/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml new file mode 100644 index 0000000000..334bd4d94c --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml @@ -0,0 +1,45 @@ +alias: benchmark-run-mlperf-inference-v3.1 +uid: 8eb42e27ec984185 + +automation_alias: cfg +automation_uid: 88dce9c160324c5d + +tags: +- benchmark +- run +- mlperf +- inference +- v3.1 + +name: "MLPerf inference - v3.1" + +supported_compute: +- ee8c568e0ac44f2b +- fe379ecd1e054a00 + +bench_uid: 39877bb63fb54725 + +view_dimensions: +- - input.device + - "MLPerf device" +- - input.implementation + - "MLPerf implementation" +- - input.backend + - "MLPerf backend" +- - input.model + - "MLPerf model" +- - input.precision + - "Model precision" +- - input.scenario + - "MLPerf scenario" +- - input.host_os + - "Host OS" +- - output.state.cm-mlperf-inference-results-last.performance + - "Got performance" + - "tick" +- - output.state.cm-mlperf-inference-results-last.accuracy + - "Got accuracy" + - "tick" +- - output.state.cm-mlperf-inference-results-last.power + - "Got energy" + - "tick" diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json new file mode 100644 index 0000000000..d1f187f498 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json @@ -0,0 +1,54 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "datacenter", + "device": "qaic", + "model": "bert-99.9", + "precision": "float16", + "implementation": "qualcomm", + "backend": "glow", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-0eeb9799b12b488f", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=datacenter", + "--device=qaic", + "--model=bert-99.9", + "--precision=float16", + "--implementation=qualcomm", + "--backend=glow", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json new file mode 100644 index 0000000000..a9243fe3ce --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "800fe1b33ca443da", + "compute_uid": "d2ae645066664463", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:25:03.786139", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json new file mode 100644 index 0000000000..a07a992e76 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json @@ -0,0 +1,11 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "cm-mlperf-inference-results-last": { + "performance": "tested-will-be-added-in-v4.0", + "performance_valid": true + } + } +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json new file mode 100644 index 0000000000..1fe11d6d51 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "precision": "int8", + "implementation": "reference", + "backend": "deepsparse", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-0eeb9799b12b488f", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=int8", + "--implementation=reference", + "--backend=deepsparse", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-0eeb9799b12b488f", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json new file mode 100644 index 0000000000..dbd58de078 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "12242042335e4bc8", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:15:53.984671", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json new file mode 100644 index 0000000000..519ddf3a3b --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "12.4548", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "64039368", + "max latency (ns)": "802905050", + "mean latency (ns)": "372956875", + "50.00 percentile latency (ns)": "378435867", + "90.00 percentile latency (ns)": "802905050", + "95.00 percentile latency (ns)": "802905050", + "97.00 percentile latency (ns)": "802905050", + "99.00 percentile latency (ns)": "802905050", + "99.90 percentile latency (ns)": "802905050", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-deepsparse-vdefault-default_config": { + "bert-99": { + "Offline": { + "performance": "12.455", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "12.455", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "12.4548", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "64039368", + "max latency (ns)": "802905050", + "mean latency (ns)": "372956875", + "50.00 percentile latency (ns)": "378435867", + "90.00 percentile latency (ns)": "802905050", + "95.00 percentile latency (ns)": "802905050", + "97.00 percentile latency (ns)": "802905050", + "99.00 percentile latency (ns)": "802905050", + "99.90 percentile latency (ns)": "802905050", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-deepsparse-vdefault-default_config": { + "bert-99": { + "Offline": { + "performance": "12.455", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "12.455", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json new file mode 100644 index 0000000000..b02bb76950 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99.9", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-52c1d43172664ed0", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99.9", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-52c1d43172664ed0", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json new file mode 100644 index 0000000000..7b7b419f34 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "52c1d43172664ed0", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:04:13.424211", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json new file mode 100644 index 0000000000..c250f0c626 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.615377", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "4705323615", + "max latency (ns)": "16250190121", + "mean latency (ns)": "10456508889", + "50.00 percentile latency (ns)": "10133038152", + "90.00 percentile latency (ns)": "16250190121", + "95.00 percentile latency (ns)": "16250190121", + "97.00 percentile latency (ns)": "16250190121", + "99.00 percentile latency (ns)": "16250190121", + "99.90 percentile latency (ns)": "16250190121", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-onnxruntime-v1.17.0-default_config": { + "bert-99.9": { + "Offline": { + "performance": "0.615", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.615", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.615377", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "4705323615", + "max latency (ns)": "16250190121", + "mean latency (ns)": "10456508889", + "50.00 percentile latency (ns)": "10133038152", + "90.00 percentile latency (ns)": "16250190121", + "95.00 percentile latency (ns)": "16250190121", + "97.00 percentile latency (ns)": "16250190121", + "99.00 percentile latency (ns)": "16250190121", + "99.90 percentile latency (ns)": "16250190121", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-onnxruntime-v1.17.0-default_config": { + "bert-99.9": { + "Offline": { + "performance": "0.615", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.615", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json new file mode 100644 index 0000000000..2addebee94 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "device": "cuda", + "model": "bert-99", + "host_os": "linux", + "precision": "float32", + "implementation": "nvidia-original", + "backend": "tensorrt", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cuda", + "--model=bert-99", + "--precision=float32", + "--implementation=nvidia-original", + "--backend=tensorrt", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json new file mode 100644 index 0000000000..0e5dcba611 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "66cce585ff0242bc", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T16:23:59.000629", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json new file mode 100644 index 0000000000..82a0cc826b --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "host_os": "linux", + "precision": "float32", + "implementation": "reference", + "backend": "tf", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=float32", + "--implementation=reference", + "--backend=tf", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json new file mode 100644 index 0000000000..3bde194aba --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "6a07cf881dee462a", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:33:11.932584", + "functional": false, + "reproduced": false, + "support_docker": false +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json new file mode 100644 index 0000000000..de6e2b2c93 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cuda", + "model": "bert-99", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-7d80f464b2274742", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cuda", + "--model=bert-99", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-7d80f464b2274742", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json new file mode 100644 index 0000000000..eadf7f2014 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json @@ -0,0 +1,10 @@ +{ + "uid": "7d80f464b2274742", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T16:04:27.903539", + "notes":"ONNX 1.15.0 worked; ONNX 1.17.0 did not work", + "functional": true, + "reproduced": true, + "support_docker": false +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json new file mode 100644 index 0000000000..5d8f74da15 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "13.1969", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "295840204", + "max latency (ns)": "757755274", + "mean latency (ns)": "521501098", + "50.00 percentile latency (ns)": "497153427", + "90.00 percentile latency (ns)": "757755274", + "95.00 percentile latency (ns)": "757755274", + "97.00 percentile latency (ns)": "757755274", + "99.00 percentile latency (ns)": "757755274", + "99.90 percentile latency (ns)": "757755274", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-gpu-onnxruntime-v1.15.0-default_config": { + "bert-99": { + "Offline": { + "performance": "13.197", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "13.197", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "13.1969", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "295840204", + "max latency (ns)": "757755274", + "mean latency (ns)": "521501098", + "50.00 percentile latency (ns)": "497153427", + "90.00 percentile latency (ns)": "757755274", + "95.00 percentile latency (ns)": "757755274", + "97.00 percentile latency (ns)": "757755274", + "99.00 percentile latency (ns)": "757755274", + "99.90 percentile latency (ns)": "757755274", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-gpu-onnxruntime-v1.15.0-default_config": { + "bert-99": { + "Offline": { + "performance": "13.197", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "13.197", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json new file mode 100644 index 0000000000..c72a9f6a27 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "host_os": "linux", + "device": "cpu", + "model": "retinanet", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=retinanet", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json new file mode 100644 index 0000000000..2b86368970 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "7f094c244ebb4985", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-18", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json new file mode 100644 index 0000000000..cae36b057d --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json @@ -0,0 +1,146 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils", + "generate,mlperf,inference,submission" + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md new file mode 100644 index 0000000000..6b58ae634a --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md @@ -0,0 +1 @@ +TBD1 diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json new file mode 100644 index 0000000000..fb7e74af53 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json @@ -0,0 +1,53 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "precision": "uint8", + "implementation": "intel-original", + "backend": "pytorch", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-d8c0f02f52bf49ae", + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=uint8", + "--implementation=intel-original", + "--backend=pytorch", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-d8c0f02f52bf49ae", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json new file mode 100644 index 0000000000..adf9c9f9f1 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "d5b6b5af6d794045", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T16:18:34.632335", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json new file mode 100644 index 0000000000..d23c116653 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json @@ -0,0 +1,53 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "retinanet", + "precision": "float32", + "implementation": "mil", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-d8c0f02f52bf49ae", + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=retinanet", + "--precision=float32", + "--implementation=mil", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-d8c0f02f52bf49ae", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json new file mode 100644 index 0000000000..b0269fa051 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json @@ -0,0 +1,10 @@ +{ + "uid": "d8c0f02f52bf49ae", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:39:15.255021", + "notes":"Used clang 14 installed via apt; LLVM 16.0.4 couldn't find llvmgold plugin - need to check ...", + "functional": false, + "reproduced": false, + "support_docker": false +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json new file mode 100644 index 0000000000..784796ecc2 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "QueueSUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.452945", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "2550773320", + "max latency (ns)": "22077722147", + "mean latency (ns)": "12323786694", + "50.00 percentile latency (ns)": "13414914364", + "90.00 percentile latency (ns)": "22077722147", + "95.00 percentile latency (ns)": "22077722147", + "97.00 percentile latency (ns)": "22077722147", + "99.00 percentile latency (ns)": "22077722147", + "99.90 percentile latency (ns)": "22077722147", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-cpp-cpu-onnxruntime-vdefault-default_config": { + "retinanet": { + "Offline": { + "performance": "0.453", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.453", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "QueueSUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.452945", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "2550773320", + "max latency (ns)": "22077722147", + "mean latency (ns)": "12323786694", + "50.00 percentile latency (ns)": "13414914364", + "90.00 percentile latency (ns)": "22077722147", + "95.00 percentile latency (ns)": "22077722147", + "97.00 percentile latency (ns)": "22077722147", + "99.00 percentile latency (ns)": "22077722147", + "99.90 percentile latency (ns)": "22077722147", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-cpp-cpu-onnxruntime-vdefault-default_config": { + "retinanet": { + "Offline": { + "performance": "0.453", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.453", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json new file mode 100644 index 0000000000..9eabe5cb60 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "resnet50", + "host_os": "windows", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=retinanet", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json new file mode 100644 index 0000000000..45eb699b96 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "df843c22cbf54aaf", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-18", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json new file mode 100644 index 0000000000..cae36b057d --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json @@ -0,0 +1,146 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils", + "generate,mlperf,inference,submission" + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md new file mode 100644 index 0000000000..97635650c3 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md @@ -0,0 +1 @@ +TBD2 diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json new file mode 100644 index 0000000000..68cf51d221 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "precision": "float32", + "implementation": "reference", + "backend": "pytorch", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-f05147815bf840b8", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=float32", + "--implementation=reference", + "--backend=pytorch", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-f05147815bf840b8", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json new file mode 100644 index 0000000000..45eb699b96 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "df843c22cbf54aaf", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-18", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json b/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json new file mode 100644 index 0000000000..627e18889a --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.771384", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "1409122219", + "max latency (ns)": "12963712908", + "mean latency (ns)": "7203424157", + "50.00 percentile latency (ns)": "7862607410", + "90.00 percentile latency (ns)": "12963712908", + "95.00 percentile latency (ns)": "12963712908", + "97.00 percentile latency (ns)": "12963712908", + "99.00 percentile latency (ns)": "12963712908", + "99.90 percentile latency (ns)": "12963712908", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://zenodo.org/record/3733896/files/model.pytorch", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-pytorch-v2.1.0-default_config": { + "bert-99": { + "Offline": { + "performance": "0.771", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.771", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.771384", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "1409122219", + "max latency (ns)": "12963712908", + "mean latency (ns)": "7203424157", + "50.00 percentile latency (ns)": "7862607410", + "90.00 percentile latency (ns)": "12963712908", + "95.00 percentile latency (ns)": "12963712908", + "97.00 percentile latency (ns)": "12963712908", + "99.00 percentile latency (ns)": "12963712908", + "99.90 percentile latency (ns)": "12963712908", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://zenodo.org/record/3733896/files/model.pytorch", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-pytorch-v2.1.0-default_config": { + "bert-99": { + "Offline": { + "performance": "0.771", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.771", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml b/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml new file mode 100644 index 0000000000..50086d0862 --- /dev/null +++ b/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml @@ -0,0 +1,38 @@ +alias: benchmark-run-mlperf-inference-v4.0 +uid: b4ee9b6c820e493a + +automation_alias: cfg +automation_uid: 88dce9c160324c5d + +tags: +- benchmark +- run +- mlperf +- inference +- v4.0 + +name: "MLPerf inference - v4.0" + +supported_compute: +- ee8c568e0ac44f2b +- fe379ecd1e054a00 + +bench_uid: 39877bb63fb54725 + +view_dimensions: +- - input.device + - "MLPerf device" +- - input.implementation + - "MLPerf implementation" +- - input.backend + - "MLPerf backend" +- - input.model + - "MLPerf model" +- - input.scenario + - "MLPerf scenario" +- - input.host_os + - "Host OS" +- - output.state.cm-mlperf-inference-results-last.performance + - "Got performance" +- - output.state.cm-mlperf-inference-results-last.accuracy + - "Got accuracy" diff --git a/script/README.md b/script/README.md new file mode 100644 index 0000000000..a9e5e41450 --- /dev/null +++ b/script/README.md @@ -0,0 +1,13 @@ +### About + +This is a source code of portable and reusable automation recipes +from MLCommons projects with a [human-friendly CM interface](https://github.com/mlcommons/ck) - +you can find a human-readable catalog of these automation recipes [here](../../docs/list_of_scripts.md). + +### License + +[Apache 2.0](../../LICENSE.md) + +### Copyright + +2022-2024 [MLCommons](https://mlcommons.org) diff --git a/script/activate-python-venv/README-extra.md b/script/activate-python-venv/README-extra.md new file mode 100644 index 0000000000..2b61d193cd --- /dev/null +++ b/script/activate-python-venv/README-extra.md @@ -0,0 +1,7 @@ +# About + +Activate python virtual environment installed via CM: + +```bash +cm run script "activate python-ven" (--version={python version}) (--name={user friendly name of the virtual environment)) +``` diff --git a/script/activate-python-venv/README.md b/script/activate-python-venv/README.md new file mode 100644 index 0000000000..9a804da0fd --- /dev/null +++ b/script/activate-python-venv/README.md @@ -0,0 +1,123 @@ +Automatically generated README for this automation recipe: **activate-python-venv** + +Category: **Python automation** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=activate-python-venv,fcbbb84946f34c55) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *activate,python-venv* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "activate python-venv" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=activate,python-venv` + +`cm run script --tags=activate,python-venv ` + +*or* + +`cmr "activate python-venv"` + +`cmr "activate python-venv " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'activate,python-venv' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="activate,python-venv"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=activate,python-venv) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "activate python-venv" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv/_cm.json)*** + * install,python-venv + * CM names: `--adr.['python-venv']...` + - CM script: [install-python-venv](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-python-venv) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/activate-python-venv/_cm.json) + +___ +### Script output +`cmr "activate python-venv " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/activate-python-venv/_cm.json b/script/activate-python-venv/_cm.json new file mode 100644 index 0000000000..90997ca293 --- /dev/null +++ b/script/activate-python-venv/_cm.json @@ -0,0 +1,25 @@ +{ + "alias": "activate-python-venv", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Python automation", + "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", + "name": "Activate virtual Python environment", + "prehook_deps": [ + { + "names": [ + "python-venv" + ], + "reuse_version": true, + "tags": "install,python-venv" + } + ], + "tags": [ + "activate", + "python", + "activate-python-venv", + "python-venv" + ], + "tags_help":"activate python-venv", + "uid": "fcbbb84946f34c55" +} diff --git a/script/activate-python-venv/customize.py b/script/activate-python-venv/customize.py new file mode 100644 index 0000000000..938a016a05 --- /dev/null +++ b/script/activate-python-venv/customize.py @@ -0,0 +1,29 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + name = env.get('CM_NAME','') + if name != '': + name = name.strip().lower() + + r = automation.update_deps({'deps':meta['prehook_deps'], + 'update_deps':{ + 'python-venv':{ + 'name':name + } + } + }) + if r['return']>0: return r + + return {'return':0} diff --git a/script/activate-python-venv/run.bat b/script/activate-python-venv/run.bat new file mode 100644 index 0000000000..5ca2ac0edd --- /dev/null +++ b/script/activate-python-venv/run.bat @@ -0,0 +1,7 @@ +echo. +echo call "%CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd" +echo. +echo Enter exit to exit virtual env. +echo. + +call %CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd diff --git a/script/activate-python-venv/run.sh b/script/activate-python-venv/run.sh new file mode 100644 index 0000000000..6569b07e55 --- /dev/null +++ b/script/activate-python-venv/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "" +echo " bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate" +echo "" +echo " Enter exit to exit virtual env." +echo "" + +bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate diff --git a/script/add-custom-nvidia-system/README-extra.md b/script/add-custom-nvidia-system/README-extra.md new file mode 100644 index 0000000000..baa487880e --- /dev/null +++ b/script/add-custom-nvidia-system/README-extra.md @@ -0,0 +1,2 @@ +# About +This CM script detects the system details using Nvidia script diff --git a/script/add-custom-nvidia-system/README.md b/script/add-custom-nvidia-system/README.md new file mode 100644 index 0000000000..51b160909b --- /dev/null +++ b/script/add-custom-nvidia-system/README.md @@ -0,0 +1,177 @@ +Automatically generated README for this automation recipe: **add-custom-nvidia-system** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=add-custom-nvidia-system,b2e6c46c6e8745a3) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *add,custom,system,nvidia* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "add custom system nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=add,custom,system,nvidia` + +`cm run script --tags=add,custom,system,nvidia[,variations] ` + +*or* + +`cmr "add custom system nvidia"` + +`cmr "add custom system nvidia [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'add,custom,system,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="add,custom,system,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=add,custom,system,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "add custom system nvidia[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**code**" +
+ Click here to expand this section. + + * `_ctuning` + - Workflow: + * `_custom` + - Workflow: + * `_mlcommons` + - Workflow: + * `_nvidia-only` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +* `r2.1` +* `r3.0` +* `r3.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cuda,_cudnn + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,tensorrt + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,generic-python-lib,_requests + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic,sys-util,_glog-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_gflags-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libre2-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libnuma-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libboost-all-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_rapidjson-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,nvidia,mlperf,inference,common-code + * CM names: `--adr.['nvidia-inference-common-code']...` + - CM script: [get-mlperf-inference-nvidia-common-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code) + * get,generic-python-lib,_pycuda + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/add-custom-nvidia-system/_cm.yaml) + +___ +### Script output +`cmr "add custom system nvidia [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/add-custom-nvidia-system/_cm.yaml b/script/add-custom-nvidia-system/_cm.yaml new file mode 100644 index 0000000000..6705c3cdc8 --- /dev/null +++ b/script/add-custom-nvidia-system/_cm.yaml @@ -0,0 +1,113 @@ +# Identification of this CM script +alias: add-custom-nvidia-system +uid: b2e6c46c6e8745a3 +cache: true +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + + +# User-friendly tags to find this CM script +tags: + - add + - custom + - system + - nvidia + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect python3 + - tags: get,python3 + names: + - python + - python3 + + # Detect CUDA + - tags: get,cuda,_cudnn + + # Detect Tensorrt + - tags: get,tensorrt + + # Detect CMake + - tags: get,cmake + + # Detect requests + - tags: get,generic-python-lib,_requests + + # Detect Google Logger + - tags: get,generic,sys-util,_glog-dev + + # Detect GFlags + - tags: get,generic,sys-util,_gflags-dev + + # Detect libre2-dev + - tags: get,generic,sys-util,_libre2-dev + + # Detect libnuma-dev + - tags: get,generic,sys-util,_libnuma-dev + + # Detect libboost-all-dev + - tags: get,generic,sys-util,_libboost-all-dev + + # Detect rapidjson-dev + - tags: get,generic,sys-util,_rapidjson-dev + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + # Detect pycuda + - tags: get,generic-python-lib,_pycuda + +variations: + nvidia-only: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _nvidia-only + custom: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _custom + mlcommons: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _mlcommons + ctuning: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _ctuning + + +versions: + r2.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r2.1 + + r3.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.0 + + r3.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.1 diff --git a/script/add-custom-nvidia-system/customize.py b/script/add-custom-nvidia-system/customize.py new file mode 100644 index 0000000000..e9573338b1 --- /dev/null +++ b/script/add-custom-nvidia-system/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] + + return {'return':0} diff --git a/script/add-custom-nvidia-system/run.sh b/script/add-custom-nvidia-system/run.sh new file mode 100644 index 0000000000..b89617f7f2 --- /dev/null +++ b/script/add-custom-nvidia-system/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +CUR=$PWD +cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} +${CM_PYTHON_BIN_WITH_PATH} scripts/custom_systems/add_custom_system.py +test $? -eq 0 || exit $? diff --git a/script/app-image-classification-onnx-py/README-extra.md b/script/app-image-classification-onnx-py/README-extra.md new file mode 100644 index 0000000000..e379e2544e --- /dev/null +++ b/script/app-image-classification-onnx-py/README-extra.md @@ -0,0 +1,17 @@ +# About + +See [this tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/modular-image-classification.md). + +# Collaborative testing + +## Windows 11 + +* CUDA 11.8; cuDNN 8.7.0; ONNX GPU 1.16.1 + +## Windows 10 + +* CUDA 11.6; cuDNN 8.6.0.96; ONNX GPU 1.13.1 + +## Ubuntu 22.04 + +* CUDA 11.3; ONNX 1.12.0 diff --git a/script/app-image-classification-onnx-py/README.md b/script/app-image-classification-onnx-py/README.md new file mode 100644 index 0000000000..e74a1be740 --- /dev/null +++ b/script/app-image-classification-onnx-py/README.md @@ -0,0 +1,211 @@ +Automatically generated README for this automation recipe: **app-image-classification-onnx-py** + +Category: **Modular AI/ML application pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-image-classification-onnx-py,3d5e908e472b417e) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *modular,python,app,image-classification,onnx* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "modular python app image-classification onnx" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=modular,python,app,image-classification,onnx` + +`cm run script --tags=modular,python,app,image-classification,onnx[,variations] [--input_flags]` + +*or* + +`cmr "modular python app image-classification onnx"` + +`cmr "modular python app image-classification onnx [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + + +#### Input Flags + +* --**input**=Path to JPEG image to classify +* --**output**=Output directory (optional) +* --**j**=Print JSON output + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'modular,python,app,image-classification,onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="modular,python,app,image-classification,onnx"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=modular,python,app,image-classification,onnx) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "modular python app image-classification onnx[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**target**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *USE_CPU*: `True` + - Workflow: + * `_cuda` + - Environment variables: + - *USE_CUDA*: `True` + - Workflow: + +
+ + +#### Default variations + +`_cpu` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input=value` → `CM_IMAGE=value` +* `--output=value` → `CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cuda + * `if (USE_CUDA == True)` + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,cudnn + * `if (USE_CUDA == True)` + * CM names: `--adr.['cudnn']...` + - CM script: [get-cudnn](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cudnn) + * get,dataset,imagenet,image-classification,original + - CM script: [get-dataset-imagenet-val](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-val) + * get,dataset-aux,imagenet-aux,image-classification + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + * get,ml-model,resnet50,_onnx,image-classification + * CM names: `--adr.['ml-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,generic-python-lib,_package.Pillow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnxruntime + * `if (USE_CUDA != True)` + * CM names: `--adr.['onnxruntime']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnxruntime_gpu + * `if (USE_CUDA == True)` + * CM names: `--adr.['onnxruntime']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) + +___ +### Script output +`cmr "modular python app image-classification onnx [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_APP_IMAGE_CLASSIFICATION_ONNX_PY*` +#### New environment keys auto-detected from customize diff --git a/script/app-image-classification-onnx-py/_cm.yaml b/script/app-image-classification-onnx-py/_cm.yaml new file mode 100644 index 0000000000..a2cd1994be --- /dev/null +++ b/script/app-image-classification-onnx-py/_cm.yaml @@ -0,0 +1,116 @@ +alias: app-image-classification-onnx-py +uid: 3d5e908e472b417e + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular AI/ML application pipeline" + +tags: +- app +- modular +- image-classification +- onnx +- python + +tags_help: "modular python app image-classification onnx" + +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + + +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 + +- tags: get,cuda + names: + - cuda + enable_if_env: + USE_CUDA: + - yes +- tags: get,cudnn + names: + - cudnn + enable_if_env: + USE_CUDA: + - yes + +- tags: get,dataset,imagenet,image-classification,original +- tags: get,dataset-aux,imagenet-aux,image-classification +- tags: get,ml-model,resnet50,_onnx,image-classification + names: + - ml-model + +- tags: get,generic-python-lib,_package.Pillow +- tags: get,generic-python-lib,_package.numpy +- tags: get,generic-python-lib,_package.opencv-python + + +- tags: get,generic-python-lib,_onnxruntime + names: + - onnxruntime + skip_if_env: + USE_CUDA: + - yes +- tags: get,generic-python-lib,_onnxruntime_gpu + names: + - onnxruntime + enable_if_env: + USE_CUDA: + - yes + +variations: + cuda: + docker: + all_gpus: 'yes' + group: target + env: + USE_CUDA: yes + + cpu: + group: target + default: yes + env: + USE_CPU: yes + +input_mapping: + input: CM_IMAGE + output: CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT + + +new_env_keys: + - CM_APP_IMAGE_CLASSIFICATION_ONNX_PY* + + +new_state_keys: + - cm_app_image_classification_onnx_py + + +input_description: + input: + desc: "Path to JPEG image to classify" + output: + desc: "Output directory (optional)" + j: + desc: "Print JSON output" + boolean: true + +docker: + skip_run_cmd: 'no' + input_paths: + - input + - env.CM_IMAGE + - output + skip_input_for_fake_run: + - input + - env.CM_IMAGE + - output + - j + pre_run_cmds: + - echo \"CM pre run commands\" diff --git a/script/app-image-classification-onnx-py/customize.py b/script/app-image-classification-onnx-py/customize.py new file mode 100644 index 0000000000..43098c71f7 --- /dev/null +++ b/script/app-image-classification-onnx-py/customize.py @@ -0,0 +1,64 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + print ('') + print ('Running preprocess function in customize.py ...') + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + + +# print ('') +# print ('Running postprocess function in customize.py ...') + + # Saving predictions to JSON file to current directory + # Should work with "cm docker script" ? + + data = state.get('cm_app_image_classification_onnx_py',{}) + + fjson = 'cm-image-classification-onnx-py.json' + fyaml = 'cm-image-classification-onnx-py.yaml' + + output=env.get('CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT','') + if output!='': + if not os.path.exists(output): + os.makedirs(output) + + fjson=os.path.join(output, fjson) + fyaml=os.path.join(output, fyaml) + + try: + import json + with open(fjson, 'w', encoding='utf-8') as f: + json.dump(data, f, ensure_ascii=False, indent=4) + except Exception as e: + print ('CM warning: {}'.format(e)) + + + try: + import yaml + with open(fyaml, 'w', encoding='utf-8') as f: + yaml.dump(data, f) + except Exception as e: + print ('CM warning: {}'.format(e)) + + top_classification = data.get('top_classification','') + + if top_classification!='': + print ('') + x = 'Top classification: {}'.format(top_classification) + print ('='*len(x)) + print (x) + + return {'return':0} diff --git a/script/app-image-classification-onnx-py/img/computer_mouse.jpg b/script/app-image-classification-onnx-py/img/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/script/app-image-classification-onnx-py/requirements.txt b/script/app-image-classification-onnx-py/requirements.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/script/app-image-classification-onnx-py/run.bat b/script/app-image-classification-onnx-py/run.bat new file mode 100644 index 0000000000..ee7db98674 --- /dev/null +++ b/script/app-image-classification-onnx-py/run.bat @@ -0,0 +1,29 @@ +rem echo %CM_PYTHON_BIN% +rem echo %CM_DATASET_PATH% +rem echo %CM_DATASET_AUX_PATH% +rem echo %CM_ML_MODEL_FILE_WITH_PATH% + +rem connect CM intelligent components with CK env +set CK_ENV_ONNX_MODEL_ONNX_FILEPATH=%CM_ML_MODEL_FILE_WITH_PATH% +set CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME=input_tensor:0 +set CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME=softmax_tensor:0 +set CK_ENV_DATASET_IMAGENET_VAL=%CM_DATASET_PATH% +set CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt +set ML_MODEL_DATA_LAYOUT=NCHW +set CK_BATCH_SIZE=%CM_BATCH_SIZE% +set CK_BATCH_COUNT=%CM_BATCH_COUNT% + +IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% + +IF DEFINED CM_INPUT SET CM_IMAGE=%CM_INPUT% + +echo. +%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\onnx_classify.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rem Just a demo to pass environment variables from native scripts back to CM workflows +echo CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess > tmp-run-env.out diff --git a/script/app-image-classification-onnx-py/run.sh b/script/app-image-classification-onnx-py/run.sh new file mode 100644 index 0000000000..62b07e1f10 --- /dev/null +++ b/script/app-image-classification-onnx-py/run.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +if [[ ${CM_RUN_DOCKER_CONTAINER} == "yes" ]]; then + exit 0 +fi + +#echo ${CM_PYTHON_BIN} +#echo ${CM_DATASET_PATH} +#echo ${CM_DATASET_AUX_PATH} +#echo ${CM_ML_MODEL_FILE_WITH_PATH} +CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +# connect CM intelligent components with CK env +export CK_ENV_ONNX_MODEL_ONNX_FILEPATH=${CM_ML_MODEL_FILE_WITH_PATH} +export CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME="input_tensor:0" +export CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME="softmax_tensor:0" +export CK_ENV_DATASET_IMAGENET_VAL=${CM_DATASET_PATH} +export CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt +export ML_MODEL_DATA_LAYOUT="NCHW" +export CK_BATCH_SIZE=${CM_BATCH_SIZE} +export CK_BATCH_COUNT=${CM_BATCH_COUNT} + +if [[ "${CM_INPUT}" != "" ]]; then export CM_IMAGE=${CM_INPUT}; fi + +PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` + +echo "" +${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${PIP_EXTRA} +test $? -eq 0 || exit 1 + +echo "" +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/onnx_classify.py +test $? -eq 0 || exit 1 + +# Just a demo to pass environment variables from native scripts back to CM workflows +echo "CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess" > tmp-run-env.out diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py new file mode 100644 index 0000000000..00baaab149 --- /dev/null +++ b/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 + +# Extended by Grigori Fursin to support MLCommons CM workflow automation language + +import os +import onnxruntime as rt +import numpy as np +import time +import json + +from PIL import Image + +model_path = os.environ['CK_ENV_ONNX_MODEL_ONNX_FILEPATH'] +input_layer_name = os.environ['CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME'] +output_layer_name = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME'] +normalize_data_bool = os.getenv('CK_ENV_ONNX_MODEL_NORMALIZE_DATA', '0') in ('YES', 'yes', 'ON', 'on', '1') +subtract_mean_bool = os.getenv('CK_ENV_ONNX_MODEL_SUBTRACT_MEAN', '0') in ('YES', 'yes', 'ON', 'on', '1') +given_channel_means = os.getenv('ML_MODEL_GIVEN_CHANNEL_MEANS','') +if given_channel_means: + given_channel_means = np.array(given_channel_means.split(' '), dtype=np.float32) + +imagenet_path = os.environ['CK_ENV_DATASET_IMAGENET_VAL'] +labels_path = os.environ['CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] +data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] +batch_size = int( os.environ['CK_BATCH_SIZE'] ) +batch_count = int( os.environ['CK_BATCH_COUNT'] ) +CPU_THREADS = int(os.getenv('CK_HOST_CPU_NUMBER_OF_PROCESSORS',0)) + + +def load_labels(labels_filepath): + my_labels = [] + input_file = open(labels_filepath, 'r') + for l in input_file: + my_labels.append(l.strip()) + return my_labels + + +def load_and_resize_image(image_filepath, height, width): + pillow_img = Image.open(image_filepath).resize((width, height)) # sic! The order of dimensions in resize is (W,H) + + # Grigori fixed below + #input_data = np.float32(pillow_img) + input_data=np.asarray(pillow_img) + input_data=np.asarray(input_data, np.float32) + + # Normalize + if normalize_data_bool: + input_data = input_data/127.5 - 1.0 + + # Subtract mean value + if subtract_mean_bool: + if len(given_channel_means): + input_data -= given_channel_means + else: + input_data -= np.mean(input_data) + +# print(np.array(pillow_img).shape) + nhwc_data = np.expand_dims(input_data, axis=0) + + if data_layout == 'NHWC': + # print(nhwc_data.shape) + return nhwc_data + else: + nchw_data = nhwc_data.transpose(0,3,1,2) + # print(nchw_data.shape) + return nchw_data + + +def load_a_batch(batch_filenames): + unconcatenated_batch_data = [] + for image_filename in batch_filenames: + image_filepath = image_filename + nchw_data = load_and_resize_image( image_filepath, height, width ) + unconcatenated_batch_data.append( nchw_data ) + batch_data = np.concatenate(unconcatenated_batch_data, axis=0) + + return batch_data + + + +#print("Device: " + rt.get_device()) + +sess_options = rt.SessionOptions() + +if CPU_THREADS > 0: + sess_options.enable_sequential_execution = False + sess_options.session_thread_pool_size = CPU_THREADS + +if len(rt.get_all_providers()) > 1 and os.environ.get("USE_CUDA", "yes").lower() not in [ "0", "false", "off", "no" ]: + #Currently considering only CUDAExecutionProvider + sess = rt.InferenceSession(model_path, sess_options, providers=['CUDAExecutionProvider']) +else: + sess = rt.InferenceSession(model_path, sess_options, providers=["CPUExecutionProvider"]) + +input_layer_names = [ x.name for x in sess.get_inputs() ] # FIXME: check that input_layer_name belongs to this list +input_layer_name = input_layer_name or input_layer_names[0] + +output_layer_names = [ x.name for x in sess.get_outputs() ] # FIXME: check that output_layer_name belongs to this list +output_layer_name = output_layer_name or output_layer_names[0] + +model_input_shape = sess.get_inputs()[0].shape +model_classes = sess.get_outputs()[1].shape[1] +labels = load_labels(labels_path) +bg_class_offset = model_classes-len(labels) # 1 means the labels represent classes 1..1000 and the background class 0 has to be skipped + +if data_layout == 'NHWC': + (samples, height, width, channels) = model_input_shape +else: + (samples, channels, height, width) = model_input_shape + +print("") +print("Data layout: {}".format(data_layout) ) +print("Input layers: {}".format([ str(x) for x in sess.get_inputs()])) +print("Output layers: {}".format([ str(x) for x in sess.get_outputs()])) +print("Input layer name: " + input_layer_name) +print("Expected input shape: {}".format(model_input_shape)) +print("Output layer name: " + output_layer_name) +print("Data normalization: {}".format(normalize_data_bool)) +print("Subtract mean: {}".format(subtract_mean_bool)) +print('Per-channel means to subtract: {}'.format(given_channel_means)) +print("Background/unlabelled classes to skip: {}".format(bg_class_offset)) +print("") + +starting_index = 1 + +start_time = time.time() + +for batch_idx in range(batch_count): + print ('') + print ("Batch {}/{}:".format(batch_idx+1, batch_count)) + + batch_filenames = [ imagenet_path + '/' + "ILSVRC2012_val_00000{:03d}.JPEG".format(starting_index + batch_idx*batch_size + i) for i in range(batch_size) ] + + # Grigori: trick to test models: + if os.environ.get('CM_IMAGE','')!='': + batch_filenames=[os.environ['CM_IMAGE']] + + batch_data = load_a_batch( batch_filenames ) + #print(batch_data.shape) + + batch_predictions = sess.run([output_layer_name], {input_layer_name: batch_data})[0] + + cm_status = {'classifications':[]} + + print ('') + top_classification = '' + for in_batch_idx in range(batch_size): + softmax_vector = batch_predictions[in_batch_idx][bg_class_offset:] # skipping the background class on the left (if present) + top5_indices = list(reversed(softmax_vector.argsort()))[:5] + + print(' * ' + batch_filenames[in_batch_idx] + ' :') + + for class_idx in top5_indices: + if top_classification == '': + top_classification = labels[class_idx] + + print("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) + + cm_status['classifications'].append({'class_idx':int(class_idx), + 'softmax': float(softmax_vector[class_idx]), + 'label':labels[class_idx]}) + + print ('') + print ('Top classification: {}'.format(top_classification)) + cm_status['top_classification'] = top_classification + +avg_time = (time.time() - start_time) / batch_count +cm_status['avg_time'] = avg_time + +# Record cm_status to embedded it into CM workflows +with open('tmp-run-state.json', 'w') as cm_file: + cm_file.write(json.dumps({'cm_app_image_classification_onnx_py':cm_status}, sort_keys=True, indent=2)) diff --git a/script/app-image-classification-onnx-py/tests/README.md b/script/app-image-classification-onnx-py/tests/README.md new file mode 100644 index 0000000000..899509cb7f --- /dev/null +++ b/script/app-image-classification-onnx-py/tests/README.md @@ -0,0 +1,14 @@ +```bash +docker system prune -a -f + +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e + +cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.CM_IMAGE=computer_mouse.jpg +cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg + +cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg -j --docker_it + +cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg --output=. + + +``` diff --git a/script/app-image-classification-tf-onnx-cpp/README-extra.md b/script/app-image-classification-tf-onnx-cpp/README-extra.md new file mode 100644 index 0000000000..5e59c8fede --- /dev/null +++ b/script/app-image-classification-tf-onnx-cpp/README-extra.md @@ -0,0 +1,3 @@ +# Image Classification App in C++ for ResNet50 model + +* In development stage, not complete diff --git a/script/app-image-classification-tf-onnx-cpp/README.md b/script/app-image-classification-tf-onnx-cpp/README.md new file mode 100644 index 0000000000..11f9495bd4 --- /dev/null +++ b/script/app-image-classification-tf-onnx-cpp/README.md @@ -0,0 +1,135 @@ +Automatically generated README for this automation recipe: **app-image-classification-tf-onnx-cpp** + +Category: **Modular AI/ML application pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-image-classification-tf-onnx-cpp,879ed32e47074033) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tf-onnx-cpp)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *app,image-classification,cpp,tensorflow,onnx* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app image-classification cpp tensorflow onnx" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,image-classification,cpp,tensorflow,onnx` + +`cm run script --tags=app,image-classification,cpp,tensorflow,onnx ` + +*or* + +`cmr "app image-classification cpp tensorflow onnx"` + +`cmr "app image-classification cpp tensorflow onnx " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image-classification,cpp,tensorflow,onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,image-classification,cpp,tensorflow,onnx"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,image-classification,cpp,tensorflow,onnx) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app image-classification cpp tensorflow onnx" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tf-onnx-cpp/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,gcc + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + * get,dataset,image-classification,original + - CM script: [get-dataset-imagenet-val](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-val) + * get,dataset-aux,image-classification + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + * get,ml-model,raw,image-classification,resnet50,_onnx,_opset-11 + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * tensorflow,from-src + - CM script: [install-tensorflow-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tensorflow-from-src) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tf-onnx-cpp/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tf-onnx-cpp/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tf-onnx-cpp/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tf-onnx-cpp/_cm.json) + +___ +### Script output +`cmr "app image-classification cpp tensorflow onnx " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/app-image-classification-tf-onnx-cpp/_cm.json b/script/app-image-classification-tf-onnx-cpp/_cm.json new file mode 100644 index 0000000000..0baccd0cb1 --- /dev/null +++ b/script/app-image-classification-tf-onnx-cpp/_cm.json @@ -0,0 +1,46 @@ +{ + "alias": "app-image-classification-tf-onnx-cpp", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Modular AI/ML application pipeline", + "default_env": { + "CM_BATCH_COUNT": "1", + "CM_BATCH_SIZE": "1" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "get,sys-utils-cm" + }, + { + "tags": "get,gcc" + }, + { + "tags": "get,dataset,image-classification,original" + }, + { + "tags": "get,dataset-aux,image-classification" + }, + { + "tags": "get,ml-model,raw,image-classification,resnet50,_onnx,_opset-11" + }, + { + "tags": "tensorflow,from-src", + "version": "v2.0.0" + } + ], + "tags": [ + "app", + "image-classification", + "tf", + "tensorflow", + "tf-onnx", + "tensorflow-onnx", + "onnx", + "cpp" + ], + "tags_help":"app image-classification cpp tensorflow onnx", + "uid": "879ed32e47074033" +} diff --git a/script/app-image-classification-tf-onnx-cpp/include/benchmark.h b/script/app-image-classification-tf-onnx-cpp/include/benchmark.h new file mode 100644 index 0000000000..42b0418fce --- /dev/null +++ b/script/app-image-classification-tf-onnx-cpp/include/benchmark.h @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#include + +#define DEBUG(msg) std::cout << "DEBUG: " << msg << std::endl; + +namespace CK { + +enum _TIMERS { + X_TIMER_SETUP, + X_TIMER_TEST, + + X_TIMER_COUNT +}; + +enum _VARS { + X_VAR_TIME_SETUP, + X_VAR_TIME_TEST, + X_VAR_TIME_IMG_LOAD_TOTAL, + X_VAR_TIME_IMG_LOAD_AVG, + X_VAR_TIME_CLASSIFY_TOTAL, + X_VAR_TIME_CLASSIFY_AVG, + + X_VAR_COUNT +}; + +enum MODEL_TYPE { + LITE, + TF_FROZEN +}; + +/// Store named value into xopenme variable. +inline void store_value_f(int index, const char* name, float value) { + char* json_name = new char[strlen(name) + 6]; + sprintf(json_name, "\"%s\":%%f", name); + //xopenme_add_var_f(index, json_name, value); + delete[] json_name; +} + +/// Load mandatory string value from the environment. +inline std::string getenv_s(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return std::string(value); +} + +/// Load mandatory integer value from the environment. +inline int getenv_i(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atoi(value); +} + +/// Load mandatory float value from the environment. +inline float getenv_f(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atof(value); +} + +/// Dummy `sprintf` like formatting function using std::string. +/// It uses buffer of fixed length so can't be used in any cases, +/// generally use it for short messages with numeric arguments. +template +inline std::string format(const char* str, Args ...args) { + char buf[1024]; + sprintf(buf, str, args...); + return std::string(buf); +} + +//---------------------------------------------------------------------- + +class Accumulator { +public: + void reset() { _total = 0, _count = 0; } + void add(float value) { _total += value, _count++; } + float total() const { return _total; } + float avg() const { return _total / static_cast(_count); } +private: + float _total = 0; + int _count = 0; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSettings { +public: + const std::string images_dir = getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DIR"); + const std::string images_file = getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_SUBSET_FOF"); + const bool skip_internal_preprocessing = getenv("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE") + && ( getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE") == "float32" ); + + const std::string result_dir = getenv_s("CK_RESULTS_DIR"); + const std::string input_layer_name = getenv_s("CK_ENV_TENSORFLOW_MODEL_INPUT_LAYER_NAME"); + const std::string output_layer_name = getenv_s("CK_ENV_TENSORFLOW_MODEL_OUTPUT_LAYER_NAME"); + const int batch_count = getenv_i("CK_BATCH_COUNT"); + const int batch_size = getenv_i("CK_BATCH_SIZE"); + const int image_size = getenv_i("CK_ENV_DATASET_IMAGENET_PREPROCESSED_INPUT_SQUARE_SIDE"); + const int num_channels = 3; + const int num_classes = 1000; + const bool normalize_img = getenv_s("CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA") == "YES"; + const bool subtract_mean = getenv_s("CK_ENV_TENSORFLOW_MODEL_SUBTRACT_MEAN") == "YES"; + const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS"); + + const bool full_report = getenv_i("CK_SILENT_MODE") == 0; + + BenchmarkSettings(enum MODEL_TYPE mode = MODEL_TYPE::LITE) { + + if(given_channel_means_str) { + std::stringstream ss(given_channel_means_str); + for(int i=0;i<3;i++){ + ss >> given_channel_means[i]; + } + } + + switch (mode) + { + case MODEL_TYPE::LITE: + _graph_file = getenv_s("CK_ENV_TENSORFLOW_MODEL_TFLITE_FILEPATH"); + break; + + case MODEL_TYPE::TF_FROZEN: + _graph_file = getenv_s("CK_ENV_TENSORFLOW_MODEL_TF_FROZEN_FILEPATH"); + break; + + default: + std::cout << "Unsupported MODEL_TYPE" << std::endl; + exit(1); + break; + }; + _number_of_threads = std::thread::hardware_concurrency(); + _number_of_threads = _number_of_threads < 1 ? 1 : _number_of_threads; + _number_of_threads = !getenv("CK_HOST_CPU_NUMBER_OF_PROCESSORS") + ? _number_of_threads + : getenv_i("CK_HOST_CPU_NUMBER_OF_PROCESSORS"); + + // Print settings + std::cout << "Graph file: " << _graph_file << std::endl; + std::cout << "Image dir: " << images_dir << std::endl; + std::cout << "Image list: " << images_file << std::endl; + std::cout << "Image size: " << image_size << std::endl; + std::cout << "Image channels: " << num_channels << std::endl; + std::cout << "Prediction classes: " << num_classes << std::endl; + std::cout << "Result dir: " << result_dir << std::endl; + std::cout << "Batch count: " << batch_count << std::endl; + std::cout << "Batch size: " << batch_size << std::endl; + std::cout << "Normalize: " << normalize_img << std::endl; + std::cout << "Subtract mean: " << subtract_mean << std::endl; + if(subtract_mean && given_channel_means_str) + std::cout << "Per-channel means to subtract: " << given_channel_means[0] + << ", " << given_channel_means[1] + << ", " << given_channel_means[2] << std::endl; + + // Create results dir if none + auto dir = opendir(result_dir.c_str()); + if (dir) + closedir(dir); + else + system(("mkdir " + result_dir).c_str()); + + // Load list of images to be processed + std::ifstream file(images_file); + if (!file) + throw "Unable to open image list file " + images_file; + for (std::string s; !getline(file, s).fail();) + _image_list.emplace_back(s); + std::cout << "Image count in file: " << _image_list.size() << std::endl; + } + + const std::vector& image_list() const { return _image_list; } + + std::vector _image_list; + + int number_of_threads() { return _number_of_threads; } + + std::string graph_file() { return _graph_file; } + + float given_channel_means[3]; +private: + int _number_of_threads; + std::string _graph_file; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSession { +public: + BenchmarkSession(const BenchmarkSettings* settings): _settings(settings) { + } + + virtual ~BenchmarkSession() {} + + float total_load_images_time() const { return _loading_time.total(); } + float total_prediction_time() const { return _total_prediction_time; } + float avg_load_images_time() const { return _loading_time.avg(); } + float avg_prediction_time() const { return _prediction_time.avg(); } + + bool get_next_batch() { + if (_batch_index+1 == _settings->batch_count) + return false; + _batch_index++; + int batch_number = _batch_index+1; + if (_settings->full_report || batch_number%10 == 0) + std::cout << "\nBatch " << batch_number << " of " << _settings->batch_count << std::endl; + int begin = _batch_index * _settings->batch_size; + int end = (_batch_index + 1) * _settings->batch_size; + int images_count = _settings->image_list().size(); + if (begin >= images_count || end > images_count) + throw format("Not enough images to populate batch %d", _batch_index); + _batch_files.clear(); + for (int i = begin; i < end; i++) + _batch_files.emplace_back(_settings->image_list()[i]); + return true; + } + + /// Begin measuring of new benchmark stage. + /// Only one stage can be measured at a time. + void measure_begin() { + _start_time = std::chrono::high_resolution_clock::now(); + } + + /// Finish measuring of batch loading stage + float measure_end_load_images() { + float duration = measure_end(); + if (_settings->full_report) + std::cout << "Batch loaded in " << duration << " s" << std::endl; + _loading_time.add(duration); + return duration; + } + + /// Finish measuring of batch prediction stage + float measure_end_prediction() { + float duration = measure_end(); + _total_prediction_time += duration; + if (_settings->full_report) + std::cout << "Batch classified in " << duration << " s" << std::endl; + // Skip first batch in order to account warming-up the system + if (_batch_index > 0 || _settings->batch_count == 1) + _prediction_time.add(duration); + return duration; + } + + int batch_index() const { return _batch_index; } + const std::vector& batch_files() const { return _batch_files; } + +private: + int _batch_index = -1; + Accumulator _loading_time; + Accumulator _prediction_time; + const BenchmarkSettings* _settings; + float _total_prediction_time = 0; + std::vector _batch_files; + std::chrono::time_point _start_time; + + float measure_end() const { + auto finish_time = std::chrono::high_resolution_clock::now(); + std::chrono::duration elapsed = finish_time - _start_time; + return static_cast(elapsed.count()); + } +}; + +//---------------------------------------------------------------------- + +inline void init_benchmark() { + //xopenme_init(X_TIMER_COUNT, X_VAR_COUNT); +} + +inline void finish_benchmark(const BenchmarkSession& s) { + // Store metrics + /* store_value_f(X_VAR_TIME_SETUP, "setup_time_s", xopenme_get_timer(X_TIMER_SETUP)); + store_value_f(X_VAR_TIME_TEST, "test_time_s", xopenme_get_timer(X_TIMER_TEST)); + store_value_f(X_VAR_TIME_IMG_LOAD_TOTAL, "images_load_time_total_s", s.total_load_images_time()); + store_value_f(X_VAR_TIME_IMG_LOAD_AVG, "images_load_time_avg_s", s.avg_load_images_time()); + store_value_f(X_VAR_TIME_CLASSIFY_TOTAL, "prediction_time_total_s", s.total_prediction_time()); + store_value_f(X_VAR_TIME_CLASSIFY_AVG, "prediction_time_avg_s", s.avg_prediction_time()); + + // Finish xopenmp + xopenme_dump_state(); + xopenme_finish();*/ +} + +template +void measure_setup(L &&lambda_function) { + //xopenme_clock_start(X_TIMER_SETUP); + lambda_function(); + //xopenme_clock_end(X_TIMER_SETUP); +} + +template +void measure_prediction(L &&lambda_function) { + //xopenme_clock_start(X_TIMER_TEST); + lambda_function(); + //xopenme_clock_end(X_TIMER_TEST); +} + +//---------------------------------------------------------------------- + +template +class StaticBuffer { +public: + StaticBuffer(int size, const std::string& dir): _size(size), _dir(dir) { + _buffer = new TData[size]; + } + + virtual ~StaticBuffer() { + delete[] _buffer; + } + + TData* data() const { return _buffer; } + int size() const { return _size; } + +protected: + const int _size; + const std::string _dir; + TData* _buffer; +}; + +//---------------------------------------------------------------------- + +class ImageData : public StaticBuffer { +public: + ImageData(const BenchmarkSettings* s): StaticBuffer( + s->image_size * s->image_size * s->num_channels * (s->skip_internal_preprocessing ? sizeof(float) : sizeof(uint8_t)), + s->images_dir) {} + + void load(const std::string& filename) { + auto path = _dir + '/' + filename; + std::ifstream file(path, std::ios::in | std::ios::binary); + if (!file) throw "Failed to open image data " + path; + file.read(reinterpret_cast(_buffer), _size); + } +}; + +//---------------------------------------------------------------------- + +class ResultData : public StaticBuffer { +public: + ResultData(const BenchmarkSettings* s): StaticBuffer( + s->num_classes, s->result_dir) {} + + void save(const std::string& filename) { + auto path = _dir + '/' + filename + ".txt"; + std::ofstream file(path); + if (!file) throw "Unable to create result file " + path; + for (int i = 0; i < _size; i++) + file << _buffer[i] << std::endl; + } +}; + +//---------------------------------------------------------------------- + +class IBenchmark { +public: + bool has_background_class = false; + + virtual ~IBenchmark() {} + virtual void load_images(const std::vector& batch_images) = 0; + virtual void save_results(const std::vector& batch_images) = 0; +}; + + +template +class Benchmark : public IBenchmark { +public: + Benchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr) { + _in_ptr = in_ptr; + _out_ptr = out_ptr; + _in_data.reset(new ImageData(settings)); + _out_data.reset(new ResultData(settings)); + _in_converter.reset(new TInConverter(settings)); + _out_converter.reset(new TOutConverter(settings)); + } + + void load_images(const std::vector& batch_images) override { + int image_offset = 0; + for (auto image_file : batch_images) { + _in_data->load(image_file); + _in_converter->convert(_in_data.get(), _in_ptr + image_offset); + image_offset += _in_data->size(); + } + } + + void save_results(const std::vector& batch_images) override { + int image_offset = 0; + int probe_offset = has_background_class ? 1 : 0; + for (auto image_file : batch_images) { + _out_converter->convert(_out_ptr + image_offset + probe_offset, _out_data.get()); + _out_data->save(image_file); + image_offset += _out_data->size() + probe_offset; + } + } + +private: + TData* _in_ptr; + TData* _out_ptr; + std::unique_ptr _in_data; + std::unique_ptr _out_data; + std::unique_ptr _in_converter; + std::unique_ptr _out_converter; +}; + +//---------------------------------------------------------------------- + +class IinputConverter { +public: + virtual ~IinputConverter() {} + virtual void convert(const ImageData* source, void* target) = 0; +}; + +//---------------------------------------------------------------------- + +class InCopy : public IinputConverter { +public: + InCopy(const BenchmarkSettings* s) {} + + void convert(const ImageData* source, void* target) { + uint8_t *uint8_target = static_cast(target); + std::copy(source->data(), source->data() + source->size(), uint8_target); + } +}; + +//---------------------------------------------------------------------- + +class InNormalize : public IinputConverter { +public: + InNormalize(const BenchmarkSettings* s): + _normalize_img(s->normalize_img), + _subtract_mean(s->subtract_mean), + _given_channel_means(s->given_channel_means), + _num_channels(s->num_channels) { + } + + void convert(const ImageData* source, void* target) { + // Copy image data to target + float *float_target = static_cast(target); + float sum = 0; + for (int i = 0; i < source->size(); i++) { + float px = source->data()[i]; + if (_normalize_img) + px = (px / 255.0 - 0.5) * 2.0; + sum += px; + float_target[i] = px; + } + // Subtract mean value if required + if (_subtract_mean) { + if(_given_channel_means) { + for (int i = 0; i < source->size(); i++) + float_target[i] -= _given_channel_means[i % _num_channels]; // assuming NHWC order! + } else { + float mean = sum / static_cast(source->size()); + for (int i = 0; i < source->size(); i++) + float_target[i] -= mean; + } + } + } + +private: + const bool _normalize_img; + const bool _subtract_mean; + const float *_given_channel_means; + const int _num_channels; +}; + +//---------------------------------------------------------------------- + +class OutCopy { +public: + OutCopy(const BenchmarkSettings* s) {} + + void convert(const float* source, ResultData* target) const { + std::copy(source, source + target->size(), target->data()); + } +}; + +//---------------------------------------------------------------------- + +class OutDequantize { +public: + OutDequantize(const BenchmarkSettings* s) {} + + void convert(const uint8_t* source, ResultData* target) const { + for (int i = 0; i < target->size(); i++) + target->data()[i] = source[i] / 255.0; + } +}; + +} // namespace CK diff --git a/script/app-image-classification-tf-onnx-cpp/run.sh b/script/app-image-classification-tf-onnx-cpp/run.sh new file mode 100644 index 0000000000..b4a46853bc --- /dev/null +++ b/script/app-image-classification-tf-onnx-cpp/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +${CM_CXX_COMPILER_WITH_PATH} -O3 ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classification.cpp -o classification.exe -ltensorflow + +test $? -eq 0 || exit 1 diff --git a/script/app-image-classification-tf-onnx-cpp/src/classification.cpp b/script/app-image-classification-tf-onnx-cpp/src/classification.cpp new file mode 100644 index 0000000000..a9ee5ee50e --- /dev/null +++ b/script/app-image-classification-tf-onnx-cpp/src/classification.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +// TODO: this header should be moved to a common location (where?) +#include "../include/benchmark.h" + +#include "tensorflow/core/public/session.h" +#include "tensorflow/cc/framework/scope.h" + +using namespace std; +using namespace CK; +using namespace tensorflow; + +int main(int argc, char* argv[]) { + try { + init_benchmark(); + + BenchmarkSettings settings(MODEL_TYPE::TF_FROZEN); + BenchmarkSession session(&settings); + ImageData input_data(&settings); + ResultData result_data(&settings); + unique_ptr input_converter; + OutCopy result_converter(&settings); + unique_ptr tf_session; + GraphDef graph_def; + + if (settings.skip_internal_preprocessing) + input_converter.reset(new InCopy(&settings)); + else + input_converter.reset(new InNormalize(&settings)); + + // TODO: this option is for TF mobilenets, but generally should be evaluated + // from weights package somehow (supported number or classes in meta?) + // TODO: this problem is related to the absence of a knowledge about + // required image size for particular image recognition network package. + // TODO: We have to provide common set of parameters for all image-recognition packages. + const bool has_background_class = true; + + cout << "\nLoading graph..." << endl; + measure_setup([&]{ + Status status = ReadBinaryProto(Env::Default(), settings.graph_file(), &graph_def); + if (!status.ok()) + throw "Failed to load graph: " + status.ToString(); + + tf_session.reset(NewSession(SessionOptions())); + + status = tf_session->Create(graph_def); + if (!status.ok()) + throw "Failed to create new session: " + status.ToString(); + }); + + cout << "\nProcessing batches..." << endl; + measure_prediction([&]{ + Tensor input(DT_FLOAT, TensorShape({settings.batch_size, + settings.image_size, + settings.image_size, + settings.num_channels})); + float* input_ptr = input.flat().data(); + vector outputs; + + while (session.get_next_batch()) { + // Load batch + session.measure_begin(); + int image_offset = 0; + for (auto image_file : session.batch_files()) { + input_data.load(image_file); + input_converter->convert(&input_data, input_ptr + image_offset); + image_offset += input_data.size(); + } + session.measure_end_load_images(); + + // Classify current batch + session.measure_begin(); + Status status = tf_session->Run( + {{settings.input_layer_name, input}}, {settings.output_layer_name}, {}, &outputs); + if (!status.ok()) + throw "Running model failed: " + status.ToString(); + session.measure_end_prediction(); + + // Process output tensor + auto output_flat = outputs[0].flat(); + if (output_flat.size() != settings.batch_size * (settings.num_classes + 1)) + throw format("Output tensor has size of %d, but expected size is %d", + output_flat.size(), settings.batch_size * (settings.num_classes + 1)); + image_offset = 0; + int probe_offset = has_background_class ? 1 : 0; + for (auto image_file : session.batch_files()) { + result_converter.convert(output_flat.data() + image_offset + probe_offset, &result_data); + result_data.save(image_file); + image_offset += result_data.size() + probe_offset; + } + } + }); + + finish_benchmark(session); + } + catch (const string& error_message) { + cerr << "ERROR: " << error_message << endl; + return -1; + } + return 0; +} diff --git a/script/app-image-classification-torch-py/README-extra.md b/script/app-image-classification-torch-py/README-extra.md new file mode 100644 index 0000000000..6628885061 --- /dev/null +++ b/script/app-image-classification-torch-py/README-extra.md @@ -0,0 +1,16 @@ +# CPU + +## 20240129; Windows 11 + +```bash +cmr "get generic-python-lib _package.torch" --version=2.1.1 +cmr "get generic-python-lib _package.torchvision" --version=0.16.2 +``` + +# CUDA + +```bash +cm run script "install python-venv" --name=test +cm run script "python app image-classification pytorch _cuda" --adr.python.name=test +cm run script "python app image-classification pytorch _cuda" --adr.python.name=test --input=src/computer_mouse.jpg +``` diff --git a/script/app-image-classification-torch-py/README.md b/script/app-image-classification-torch-py/README.md new file mode 100644 index 0000000000..107a6a860c --- /dev/null +++ b/script/app-image-classification-torch-py/README.md @@ -0,0 +1,168 @@ +Automatically generated README for this automation recipe: **app-image-classification-torch-py** + +Category: **Modular AI/ML application pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-image-classification-torch-py,e3986ae887b84ca8) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-torch-py)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *app,image-classification,python,torch* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app image-classification python torch" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,image-classification,python,torch` + +`cm run script --tags=app,image-classification,python,torch[,variations] ` + +*or* + +`cmr "app image-classification python torch"` + +`cmr "app image-classification python torch [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image-classification,python,torch' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,image-classification,python,torch"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,image-classification,python,torch) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app image-classification python torch[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_cuda` + - Environment variables: + - *USE_CUDA*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-torch-py/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,imagenet,image-classification,preprocessed + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,dataset-aux,imagenet-aux,image-classification + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + * get,imagenet-helper + - CM script: [get-dataset-imagenet-helper](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-helper) + * get,ml-model,image-classification,resnet50,_pytorch,_fp32 + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,generic-python-lib,_torch + * `if (USE_CUDA != yes)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch_cuda + * `if (USE_CUDA == yes)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + * `if (USE_CUDA != yes)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision_cuda + * `if (USE_CUDA == yes)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-torch-py/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-torch-py/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-torch-py/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-torch-py/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-torch-py/_cm.json) + +___ +### Script output +`cmr "app image-classification python torch [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/app-image-classification-torch-py/_cm.json b/script/app-image-classification-torch-py/_cm.json new file mode 100644 index 0000000000..a6a78a6798 --- /dev/null +++ b/script/app-image-classification-torch-py/_cm.json @@ -0,0 +1,89 @@ +{ + "alias": "app-image-classification-torch-py", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Modular AI/ML application pipeline", + "default_env": { + "CM_BATCH_COUNT": "1", + "CM_BATCH_SIZE": "1" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "get,sys-utils-cm" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,dataset,imagenet,image-classification,preprocessed" + }, + { + "tags": "get,dataset-aux,imagenet-aux,image-classification" + }, + { + "tags": "get,imagenet-helper" + }, + { + "tags": "get,ml-model,image-classification,resnet50,_pytorch,_fp32" + }, + { + "tags": "get,generic-python-lib,_torch", + "skip_if_env": { + "USE_CUDA": [ + "yes" + ] + } + }, + { + "tags": "get,generic-python-lib,_torch_cuda", + "enable_if_env": { + "USE_CUDA": [ + "yes" + ] + } + }, + { + "tags": "get,generic-python-lib,_torchvision", + "skip_if_env": { + "USE_CUDA": [ + "yes" + ] + } + }, + { + "tags": "get,generic-python-lib,_torchvision_cuda", + "enable_if_env": { + "USE_CUDA": [ + "yes" + ] + } + } + ], + "tags": [ + "app", + "image-classification", + "torch", + "python" + ], + "tags_help":"app image-classification python torch", + "variations": { + "cuda": { + "env": { + "USE_CUDA": "yes" + }, + "deps": [ + { + "tags": "get,cuda" + } + ] + } + }, + "uid": "e3986ae887b84ca8" +} diff --git a/script/app-image-classification-torch-py/img/computer_mouse.jpg b/script/app-image-classification-torch-py/img/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/script/app-image-classification-torch-py/requirements.txt b/script/app-image-classification-torch-py/requirements.txt new file mode 100644 index 0000000000..d1c427e4aa --- /dev/null +++ b/script/app-image-classification-torch-py/requirements.txt @@ -0,0 +1,4 @@ +Pillow +requests +numpy + diff --git a/script/app-image-classification-torch-py/run.bat b/script/app-image-classification-torch-py/run.bat new file mode 100644 index 0000000000..1415d4265b --- /dev/null +++ b/script/app-image-classification-torch-py/run.bat @@ -0,0 +1,20 @@ +rem connect CM portable scripts with CK env + +set CM_ML_TORCH_MODEL_NAME=resnet50 +set CM_ML_MODEL_INPUT_DATA_TYPE=float32 +set CM_ML_MODEL_IMAGE_HEIGHT=224 +set CM_ML_MODEL_IMAGE_WIDTH=224 + +rem set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_PATH% + +set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_FULL_PATH% +set CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt +set CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 +set CM_RESULTS_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%\results +set ML_MODEL_DATA_LAYOUT=NCHW + +%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\pytorch_classify_preprocessed.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/app-image-classification-torch-py/run.sh b/script/app-image-classification-torch-py/run.sh new file mode 100644 index 0000000000..b50b79eb40 --- /dev/null +++ b/script/app-image-classification-torch-py/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +# connect CM intelligent components with CK env +export CM_ML_TORCH_MODEL_NAME=resnet50 +export CM_ML_MODEL_INPUT_DATA_TYPE=float32 +export CM_ML_MODEL_IMAGE_HEIGHT=224 +export CM_ML_MODEL_IMAGE_WIDTH=224 +export CM_DATASET_IMAGENET_PREPROCESSED_DIR=${CM_DATASET_PREPROCESSED_FULL_PATH} +export CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt +export CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 +export CM_RESULTS_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}/results +export ML_MODEL_DATA_LAYOUT=NCHW + +${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt +test $? -eq 0 || exit 1 + +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/pytorch_classify_preprocessed.py +test $? -eq 0 || exit 1 diff --git a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py new file mode 100644 index 0000000000..f3ee0b587d --- /dev/null +++ b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 + +import json +import time +import os +import shutil +import numpy as np + + +import torch +import torchvision.models as models + +import imagenet_helper +from imagenet_helper import (load_preprocessed_batch, image_list, class_labels, BATCH_SIZE) + +## Writing the results out: +# +RESULTS_DIR = os.getenv('CM_RESULTS_DIR') +FULL_REPORT = os.getenv('CM_SILENT_MODE', '0') in ('NO', 'no', 'OFF', 'off', '0') + +## Processing by batches: +# +BATCH_COUNT = int(os.getenv('CM_BATCH_COUNT', 1)) + +## Enabling GPU if available and not disabled: +# +USE_CUDA = (os.getenv('USE_CUDA', '').strip()=='yes') + + +labels_path = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] + +def load_labels(labels_filepath): + my_labels = [] + input_file = open(labels_filepath, 'r') + for l in input_file: + my_labels.append(l.strip()) + return my_labels + + +labels = load_labels(labels_path) + + +data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] + + + +def main(): + global BATCH_SIZE + global BATCH_COUNT + + setup_time_begin = time.time() + + bg_class_offset=0 + + # Cleanup results directory + if os.path.isdir(RESULTS_DIR): + shutil.rmtree(RESULTS_DIR) + os.mkdir(RESULTS_DIR) + + # Load the [cached] Torch model + path_to_model_pth = os.environ['CM_ML_MODEL_FILE_WITH_PATH'] + + model=models.resnet50(pretrained=False) + model.load_state_dict(torch.load(path_to_model_pth)) + + model.eval() + + # move the model to GPU for speed if available + if USE_CUDA: + model.to('cuda') + + setup_time = time.time() - setup_time_begin + + # Run batched mode + test_time_begin = time.time() + image_index = 0 + total_load_time = 0 + total_classification_time = 0 + first_classification_time = 0 + images_loaded = 0 + + image_path = os.environ.get('CM_INPUT','') + if image_path !='': + + normalize_data_bool=True + subtract_mean_bool=False + + from PIL import Image + + def load_and_resize_image(image_filepath, height, width): + pillow_img = Image.open(image_filepath).resize((width, height)) # sic! The order of dimensions in resize is (W,H) + + input_data = np.float32(pillow_img) + + # Normalize + if normalize_data_bool: + input_data = input_data/127.5 - 1.0 + + # Subtract mean value + if subtract_mean_bool: + if len(given_channel_means): + input_data -= given_channel_means + else: + input_data -= np.mean(input_data) + + # print(np.array(pillow_img).shape) + nhwc_data = np.expand_dims(input_data, axis=0) + + if data_layout == 'NHWC': + # print(nhwc_data.shape) + return nhwc_data + else: + nchw_data = nhwc_data.transpose(0,3,1,2) + # print(nchw_data.shape) + return nchw_data + + BATCH_COUNT=1 + + + for batch_index in range(BATCH_COUNT): + batch_number = batch_index+1 + if FULL_REPORT or (batch_number % 10 == 0): + print("\nBatch {} of {}".format(batch_number, BATCH_COUNT)) + + begin_time = time.time() + + if image_path=='': + batch_data, image_index = load_preprocessed_batch(image_list, image_index) + else: + batch_data = load_and_resize_image(image_path, 224, 224) + image_index = 1 + + torch_batch = torch.from_numpy( batch_data ) + + load_time = time.time() - begin_time + total_load_time += load_time + images_loaded += BATCH_SIZE + if FULL_REPORT: + print("Batch loaded in %fs" % (load_time)) + + # Classify one batch + begin_time = time.time() + + # move the input to GPU for speed if available + if USE_CUDA: + torch_batch = torch_batch.to('cuda') + + with torch.no_grad(): + batch_results = model( torch_batch ) + + classification_time = time.time() - begin_time + if FULL_REPORT: + print("Batch classified in %fs" % (classification_time)) + + total_classification_time += classification_time + # Remember first batch prediction time + if batch_index == 0: + first_classification_time = classification_time + + # Process results + for index_in_batch in range(BATCH_SIZE): + softmax_vector = batch_results[index_in_batch][bg_class_offset:] # skipping the background class on the left (if present) + global_index = batch_index * BATCH_SIZE + index_in_batch + + res_file = os.path.join(RESULTS_DIR, image_list[global_index]) + + with open(res_file + '.txt', 'w') as f: + for prob in softmax_vector: + f.write('{}\n'.format(prob)) + + top5_indices = list(reversed(softmax_vector.argsort()))[:5] + for class_idx in top5_indices: + print("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) + print("") + + + test_time = time.time() - test_time_begin + + if BATCH_COUNT > 1: + avg_classification_time = (total_classification_time - first_classification_time) / (images_loaded - BATCH_SIZE) + else: + avg_classification_time = total_classification_time / images_loaded + + avg_load_time = total_load_time / images_loaded + + # Store benchmarking results: + output_dict = { + 'setup_time_s': setup_time, + 'test_time_s': test_time, + 'images_load_time_total_s': total_load_time, + 'images_load_time_avg_s': avg_load_time, + 'prediction_time_total_s': total_classification_time, + 'prediction_time_avg_s': avg_classification_time, + + 'avg_time_ms': avg_classification_time * 1000, + 'avg_fps': 1.0 / avg_classification_time, + 'batch_time_ms': avg_classification_time * 1000 * BATCH_SIZE, + 'batch_size': BATCH_SIZE, + } + with open('tmp-ck-timer.json', 'w') as out_file: + json.dump(output_dict, out_file, indent=4, sort_keys=True) + + +if __name__ == '__main__': + main() diff --git a/script/app-image-classification-tvm-onnx-py/README-extra.md b/script/app-image-classification-tvm-onnx-py/README-extra.md new file mode 100644 index 0000000000..c24e073a99 --- /dev/null +++ b/script/app-image-classification-tvm-onnx-py/README-extra.md @@ -0,0 +1,16 @@ +Example: + +```bash +cm run script "get llvm" --version=14.0.0 +cm run script "get tvm _llvm" --version=0.10.0 +cm run script "python app image-classification tvm-onnx" +``` + +Example 2: + +```bash +cm run script "install python-venv" --name=test --version=3.10.7 +cm run script "get generic-python-lib _apache-tvm" +cm run script "python app image-classification tvm-onnx _tvm-pip-install" +cm run script "python app image-classification tvm-onnx _tvm-pip-install" --input=`cm find script --tags=python,app,image-classification,tvm-onnx`/img/computer_mouse.jpg +``` \ No newline at end of file diff --git a/script/app-image-classification-tvm-onnx-py/README.md b/script/app-image-classification-tvm-onnx-py/README.md new file mode 100644 index 0000000000..bd1d4c56aa --- /dev/null +++ b/script/app-image-classification-tvm-onnx-py/README.md @@ -0,0 +1,160 @@ +Automatically generated README for this automation recipe: **app-image-classification-tvm-onnx-py** + +Category: **Modular AI/ML application pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-image-classification-tvm-onnx-py,63080407db4d4ac4) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tvm-onnx-py)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *app,image-classification,python,tvm-onnx* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app image-classification python tvm-onnx" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,image-classification,python,tvm-onnx` + +`cm run script --tags=app,image-classification,python,tvm-onnx[,variations] ` + +*or* + +`cmr "app image-classification python tvm-onnx"` + +`cmr "app image-classification python tvm-onnx [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image-classification,python,tvm-onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,image-classification,python,tvm-onnx"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,image-classification,python,tvm-onnx) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app image-classification python tvm-onnx[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_cuda` + - Environment variables: + - *USE_CUDA*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_llvm` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tvm-onnx-py/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,image-classification,original + - CM script: [get-dataset-imagenet-val](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-val) + * get,dataset-aux,image-classification + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + * get,raw,ml-model,image-classification,resnet50,_onnx + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,generic-python-lib,_onnxruntime + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,tvm + * CM names: `--adr.['tvm']...` + - CM script: [get-tvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tvm-onnx-py/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tvm-onnx-py/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tvm-onnx-py/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-classification-tvm-onnx-py/_cm.json) + +___ +### Script output +`cmr "app image-classification python tvm-onnx [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/app-image-classification-tvm-onnx-py/_cm.json b/script/app-image-classification-tvm-onnx-py/_cm.json new file mode 100644 index 0000000000..1ae2e5c320 --- /dev/null +++ b/script/app-image-classification-tvm-onnx-py/_cm.json @@ -0,0 +1,73 @@ +{ + "alias": "app-image-classification-tvm-onnx-py", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Modular AI/ML application pipeline", + "default_env": { + "CM_BATCH_COUNT": "1", + "CM_BATCH_SIZE": "1" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,dataset,image-classification,original" + }, + { + "tags": "get,dataset-aux,image-classification" + }, + { + "tags": "get,raw,ml-model,image-classification,resnet50,_onnx" + }, + { + "tags": "get,generic-python-lib,_onnxruntime" + }, + { + "names": [ "tvm" ], + "tags": "get,tvm" + } + ], + "tags": [ + "app", + "image-classification", + "tvm-onnx", + "python" + ], + "tags_help":"app image-classification python tvm-onnx", + "uid": "63080407db4d4ac4", + "variations": { + "llvm": { + "add_deps_recursive": { + "tvm": { + "tags": "_llvm" + } + } + }, + "cuda": { + "add_deps_recursive": { + "tvm": { + "tags": "_cuda" + } + }, + "env": { + "USE_CUDA": "yes" + }, + "deps": [ + { + "tags": "get,cuda" + } + ] + } + } +} diff --git a/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg b/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/script/app-image-classification-tvm-onnx-py/requirements.txt b/script/app-image-classification-tvm-onnx-py/requirements.txt new file mode 100644 index 0000000000..ae4aff7eae --- /dev/null +++ b/script/app-image-classification-tvm-onnx-py/requirements.txt @@ -0,0 +1,7 @@ +matplotlib +opencv-python +scipy +onnx +decorator +attrs +psutil diff --git a/script/app-image-classification-tvm-onnx-py/run.sh b/script/app-image-classification-tvm-onnx-py/run.sh new file mode 100644 index 0000000000..8eb0660771 --- /dev/null +++ b/script/app-image-classification-tvm-onnx-py/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +#if [[ ${CM_HOST_PLATFORM_FLAVOR} == "arm64" ]]; then +# ${CM_PYTHON_BIN} -m pip install -i https://test.pypi.org/simple/ onnxruntime==1.9.0.dev174552 +#fi + +export USE_TVM=yes + + +wget -nc https://raw.githubusercontent.com/mlcommons/ck-mlops/main/program/ml-task-image-classification-tvm-onnx-cpu/synset.txt +test $? -eq 0 || exit 1 + +${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt +test $? -eq 0 || exit 1 + +if [[ "${CM_INPUT}" != "" ]]; then + export CM_IMAGE=${CM_INPUT} +else + export CM_IMAGE=${CM_DATASET_PATH}/ILSVRC2012_val_00000001.JPEG +fi + + +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classify.py --image ${CM_IMAGE} +test $? -eq 0 || exit 1 diff --git a/script/app-image-classification-tvm-onnx-py/src/classify.py b/script/app-image-classification-tvm-onnx-py/src/classify.py new file mode 100644 index 0000000000..0eb299f2df --- /dev/null +++ b/script/app-image-classification-tvm-onnx-py/src/classify.py @@ -0,0 +1,292 @@ +""" +Developers: + - grigori@octoml.ai +""" + +import time +import os +import argparse +import json + +from PIL import Image +import cv2 + +import numpy as np + +import onnxruntime as rt + + + +# Image conversion from MLPerf(tm) vision +def center_crop(img, out_height, out_width): + height, width, _ = img.shape + left = int((width - out_width) / 2) + right = int((width + out_width) / 2) + top = int((height - out_height) / 2) + bottom = int((height + out_height) / 2) + img = img[top:bottom, left:right] + return img + + +def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR): + height, width, _ = img.shape + new_height = int(100. * out_height / scale) + new_width = int(100. * out_width / scale) + if height > width: + w = new_width + h = int(new_height * height / width) + else: + h = new_height + w = int(new_width * width / height) + img = cv2.resize(img, (w, h), interpolation=inter_pol) + return img + + +# returns list of pairs (prob, class_index) +def get_top5(all_probs): + probs_with_classes = [] + + for class_index in range(len(all_probs)): + prob = all_probs[class_index] + probs_with_classes.append((prob, class_index)) + + sorted_probs = sorted(probs_with_classes, key = lambda pair: pair[0], reverse=True) + return sorted_probs[0:5] + +def run_case(dtype, image, target): + # Check image + import os + import json + import sys + + STAT_REPEAT=os.environ.get('STAT_REPEAT','') + if STAT_REPEAT=='' or STAT_REPEAT==None: + STAT_REPEAT=10 + STAT_REPEAT=int(STAT_REPEAT) + + # FGG: set model files via CM env + CATEG_FILE = 'synset.txt' + synset = eval(open(os.path.join(CATEG_FILE)).read()) + + files=[] + val={} + + # FGG: set timers + import time + timers={} + + img_orig = cv2.imread(image) + + img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB) + + output_height, output_width, _ = 224, 224, 3 + img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_AREA) + img = center_crop(img, output_height, output_width) + img = np.asarray(img, dtype='float32') + + # normalize image + means = np.array([123.68, 116.78, 103.94], dtype=np.float32) + img -= means + + # transpose if needed + img = img.transpose([2, 0, 1]) + + import matplotlib.pyplot as plt + img1 = img.transpose([1, 2, 0]) + arr_ = np.squeeze(img1) # you can give axis attribute if you wanna squeeze in specific dimension + plt.imshow(arr_) +# plt.show() + plt.savefig('pre-processed-image.png') + # Load model + model_path=os.environ.get('CM_ML_MODEL_FILE_WITH_PATH','') + if model_path=='': + print ('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined') + exit(1) + + opt = rt.SessionOptions() + + if len(rt.get_all_providers()) > 1 and os.environ.get("USE_CUDA", "yes").lower() not in [ "0", "false", "off", "no" ]: + #Currently considering only CUDAExecutionProvider + sess = rt.InferenceSession(model_path, opt, providers=['CUDAExecutionProvider']) + else: + sess = rt.InferenceSession(model_path, opt, providers=["CPUExecutionProvider"]) + + inputs = [meta.name for meta in sess.get_inputs()] + outputs = [meta.name for meta in sess.get_outputs()] + + print (inputs) + print (outputs) + + + + + if os.environ.get('USE_TVM','')=='yes': + import tvm + from tvm import relay + import onnx + + del sess + + # Load model via ONNX to be used with TVM + print ('') + print ('ONNX: load model ...') + print ('') + + onnx_model = onnx.load(model_path) + + # Init TVM + # TBD: add tvm platform selector + if os.environ.get('USE_CUDA','')=='yes': + # TVM package must be built with CUDA enabled + ctx = tvm.cuda(0) + else: + ctx = tvm.cpu(0) + tvm_ctx = ctx + + build_conf = {'relay.backend.use_auto_scheduler': False} + opt_lvl = int(os.environ.get('TVM_OPT_LEVEL', 3)) + host = os.environ.get('CM_HOST_PLATFORM_FLAVOR') + if host == 'x86_64' and 'AMD' in os.environ.get('CM_HOST_CPU_VENDOR_ID',''): + target = os.environ.get('TVM_TARGET', 'llvm -mcpu=znver2') + else: + target = os.environ.get('TVM_TARGET', 'llvm') + + target_host=None + params={} + + # New target API + tvm_target = tvm.target.Target(target, host=target_host) + + input_shape = (1, 3, 224, 224) + shape_dict = {inputs[0]: input_shape} + + print ('') + print ('TVM: import model ...') + print ('') + # Extra param: opset=12 + mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True) + + print ('') + print ('TVM: transform to static ...') + print ('') + mod = relay.transform.DynamicToStatic()(mod) + + print ('') + print ('TVM: apply extra optimizations ...') + print ('') + # Padding optimization + # Adds extra optimizations + mod = relay.transform.FoldExplicitPadding()(mod) + + + print ('') + print ('TVM: build model ...') + print ('') + + executor=os.environ.get('MLPERF_TVM_EXECUTOR','graph') + + if executor == "graph" or executor == "debug": + from tvm.contrib import graph_executor + + # Without history + with tvm.transform.PassContext(opt_level=opt_lvl, config=build_conf): + graph_module = relay.build(mod, + target=tvm_target, + params=params) + lib = graph_module + + print ('') + print ('TVM: init graph engine ...') + print ('') + + sess = graph_executor.GraphModule(lib['default'](ctx)) + + + elif executor == "vm": + from tvm.runtime.vm import VirtualMachine + + # Without history + with tvm.transform.PassContext(opt_level=opt_lvl, config=build_conf): + vm_exec = relay.vm.compile(mod, target=tvm_target, params=params) + + r_exec = vm_exec + + print ('') + print ('TVM: init VM ...') + print ('') + + sess = VirtualMachine(r_exec, ctx) + + + # For now only graph + sess.set_input(inputs[0], tvm.nd.array([img])) + + # Run TVM inference + sess.run() + + # Process TVM outputs + output = [] + + for i in range(sess.get_num_outputs()): + # Take only the output of batch size for dynamic batches + if len(output)<(i+1): + output.append([]) + output[i].append(sess.get_output(i).asnumpy()[0]) + + + + else: + inp={inputs[0]:np.array([img], dtype=np.float32)} + output=sess.run(outputs, inp) + + + + + top1 = np.argmax(output[1])-1 #.asnumpy()) + + top5=[] + atop5 = get_top5(output[1][0]) #.asnumpy()) + + print ('') + print('Prediction Top1:', top1, synset[top1]) + + print ('') + print('Prediction Top5:') + for p in atop5: + out=p[1]-1 + name=synset[out] + print (' * {} {}'.format(out, name)) + + ck_results={ + 'prediction':synset[top1] + } + + with open('tmp-ck-timer.json', 'w') as ck_results_file: + json.dump(ck_results, ck_results_file, indent=2, sort_keys=True) + + return + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--image', type=str, help="Path to JPEG image.", default=None, required=True) + parser.add_argument('--target', type=str, help="Target", default=None) + args = parser.parse_args() + + if args.image.strip().lower()=='': + print ('Please specify path to an image using CM_IMAGE environment variable!') + exit(1) + + # set parameter + batch_size = 1 + num_classes = 1000 + image_shape = (3, 224, 224) + + # load model + data_shape = (batch_size,) + image_shape + out_shape = (batch_size, num_classes) + + dtype='float32' + if os.environ.get('CM_TVM_DTYPE','')!='': + dtype=os.environ['CM_TVM_DTYPE'] + + run_case(dtype, args.image, args.target) diff --git a/script/app-image-corner-detection/README-extra.md b/script/app-image-corner-detection/README-extra.md new file mode 100644 index 0000000000..cc22865183 --- /dev/null +++ b/script/app-image-corner-detection/README-extra.md @@ -0,0 +1,25 @@ +# Examples + +First download images: + +```bash +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/data.pgm --verify=no --env.CM_DOWNLOAD_CHECKSUM=0af279e557a8de252d7ff0751a999379 +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=e7e2050b41e0b85cedca3ca87ab55390 +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.pgm --verify=no --env.CM_DOWNLOAD_CHECKSUM=a4e48556d3eb09402bfc98e375b41311 +``` + +Then run app + +```bash +cm run script "app image corner-detection" +cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm +cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=gcc +cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm --add_deps_recursive.compiler.version_min=11.0.0 --add_deps_recursive.compiler.version_max=13.0.0 +``` + +## Reproducibility matrix + +* Ubuntu 22.04; x64; LLVM 17.06 +* Windows 11; x64; LLVM 17.06 + diff --git a/script/app-image-corner-detection/README.md b/script/app-image-corner-detection/README.md new file mode 100644 index 0000000000..dad1c84a9a --- /dev/null +++ b/script/app-image-corner-detection/README.md @@ -0,0 +1,129 @@ +Automatically generated README for this automation recipe: **app-image-corner-detection** + +Category: **Modular application pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-image-corner-detection,998ffee0bc534d0a) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *app,image,corner-detection* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app image corner-detection" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,image,corner-detection` + +`cm run script --tags=app,image,corner-detection ` + +*or* + +`cmr "app image corner-detection"` + +`cmr "app image corner-detection " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image,corner-detection' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,image,corner-detection"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,image,corner-detection) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app image corner-detection" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection/run.sh) + 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection/_cm.json)*** + * compile,cpp-program + * `if (CM_SKIP_COMPILE != on)` + - CM script: [compile-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-program) + * benchmark-program + * `if (CM_SKIP_RUN != on)` + - CM script: [benchmark-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-image-corner-detection/_cm.json) + +___ +### Script output +`cmr "app image corner-detection " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/app-image-corner-detection/_cm.json b/script/app-image-corner-detection/_cm.json new file mode 100644 index 0000000000..405654f5ee --- /dev/null +++ b/script/app-image-corner-detection/_cm.json @@ -0,0 +1,34 @@ +{ + "alias": "app-image-corner-detection", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Modular application pipeline", + "deps": [ + {"tags":"detect,os"}, + {"tags":"detect,cpu"} + ], + "posthook_deps": [ + { + "skip_if_env": { + "CM_SKIP_COMPILE": [ + "on" + ] + }, + "tags": "compile,cpp-program" + }, + { + "skip_if_env": { + "CM_SKIP_RUN": [ + "on" + ] + }, + "tags": "benchmark-program" + } + ], + "tags": [ + "app", + "image", + "corner-detection" + ], + "uid": "998ffee0bc534d0a" +} diff --git a/script/app-image-corner-detection/customize.py b/script/app-image-corner-detection/customize.py new file mode 100644 index 0000000000..19536aee3c --- /dev/null +++ b/script/app-image-corner-detection/customize.py @@ -0,0 +1,34 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + script_path=i['run_script_input']['path'] + + env["CM_SOURCE_FOLDER_PATH"] = script_path + env['CM_C_SOURCE_FILES']="susan.c" + + if 'CM_INPUT' not in env: + env['CM_INPUT'] = os.path.join(script_path, 'data.pgm') + if 'CM_OUTPUT' not in env: + env['CM_OUTPUT'] = 'output_image_with_corners.pgm' + if 'CM_RUN_DIR' not in env: + env['CM_RUN_DIR'] = os.path.join(script_path, "output") + env['CM_RUN_SUFFIX']= env['CM_INPUT'] + ' ' + env['CM_OUTPUT'] + ' -c' + + if os_info['platform'] == 'windows': + env['CM_BIN_NAME']='image-corner.exe' + else: + env['CM_BIN_NAME']='image-corner' + env['+ LDCFLAGS'] = ["-lm"] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + print(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR']) + + return {'return':0} diff --git a/script/app-image-corner-detection/run.sh b/script/app-image-corner-detection/run.sh new file mode 100644 index 0000000000..30cfbdd00e --- /dev/null +++ b/script/app-image-corner-detection/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +CUR=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +mkdir -p $CUR"/output" + +test $? -eq 0 || exit 1 diff --git a/script/app-image-corner-detection/susan.c b/script/app-image-corner-detection/susan.c new file mode 100644 index 0000000000..8a41d9a22e --- /dev/null +++ b/script/app-image-corner-detection/susan.c @@ -0,0 +1,2161 @@ +/* {{{ Copyright etc. */ + +/**********************************************************************\ + + SUSAN Version 2l by Stephen Smith + Oxford Centre for Functional Magnetic Resonance Imaging of the Brain, + Department of Clinical Neurology, Oxford University, Oxford, UK + (Previously in Computer Vision and Image Processing Group - now + Computer Vision and Electro Optics Group - DERA Chertsey, UK) + Email: steve@fmrib.ox.ac.uk + WWW: http://www.fmrib.ox.ac.uk/~steve + + (C) Crown Copyright (1995-1999), Defence Evaluation and Research Agency, + Farnborough, Hampshire, GU14 6TD, UK + DERA WWW site: + http://www.dera.gov.uk/ + DERA Computer Vision and Electro Optics Group WWW site: + http://www.dera.gov.uk/imageprocessing/dera/group_home.html + DERA Computer Vision and Electro Optics Group point of contact: + Dr. John Savage, jtsavage@dera.gov.uk, +44 1344 633203 + + A UK patent has been granted: "Method for digitally processing + images to determine the position of edges and/or corners therein for + guidance of unmanned vehicle", UK Patent 2272285. Proprietor: + Secretary of State for Defence, UK. 15 January 1997 + + This code is issued for research purposes only and remains the + property of the UK Secretary of State for Defence. This code must + not be passed on without this header information being kept + intact. This code must not be sold. + +\**********************************************************************/ + +/* }}} */ +/* {{{ Readme First */ + +/**********************************************************************\ + + SUSAN Version 2l + SUSAN = Smallest Univalue Segment Assimilating Nucleus + + Email: steve@fmrib.ox.ac.uk + WWW: http://www.fmrib.ox.ac.uk/~steve + + Related paper: + @article{Smith97, + author = "Smith, S.M. and Brady, J.M.", + title = "{SUSAN} - A New Approach to Low Level Image Processing", + journal = "Int. Journal of Computer Vision", + pages = "45--78", + volume = "23", + number = "1", + month = "May", + year = 1997} + + To be registered for automatic (bug) updates of SUSAN, send an email. + + Compile with: + gcc -O4 -o susan susan2l.c -lm + + See following section for different machine information. Please + report any bugs (and fixes). There are a few optional changes that + can be made in the "defines" section which follows shortly. + + Usage: type "susan" to get usage. Only PGM format files can be input + and output. Utilities such as the netpbm package and XV can be used + to convert to and from other formats. Any size of image can be + processed. + + This code is written using an emacs folding mode, making moving + around the different sections very easy. This is why there are + various marks within comments and why comments are indented. + + + SUSAN QUICK: + + This version of the SUSAN corner finder does not do all the + false-corner suppression and thus is faster and produced some false + positives, particularly on strong edges. However, because there are + less stages involving thresholds etc., the corners that are + correctly reported are usually more stable than those reported with + the full algorithm. Thus I recommend at least TRYING this algorithm + for applications where stability is important, e.g., tracking. + + THRESHOLDS: + + There are two thresholds which can be set at run-time. These are the + brightness threshold (t) and the distance threshold (d). + + SPATIAL CONTROL: d + + In SUSAN smoothing d controls the size of the Gaussian mask; its + default is 4.0. Increasing d gives more smoothing. In edge finding, + a fixed flat mask is used, either 37 pixels arranged in a "circle" + (default), or a 3 by 3 mask which gives finer detail. In corner + finding, only the larger 37 pixel mask is used; d is not + variable. In smoothing, the flat 3 by 3 mask can be used instead of + a larger Gaussian mask; this gives low smoothing and fast operation. + + BRIGHTNESS CONTROL: t + + In all three algorithms, t can be varied (default=20); this is the + main threshold to be varied. It determines the maximum difference in + greylevels between two pixels which allows them to be considered + part of the same "region" in the image. Thus it can be reduced to + give more edges or corners, i.e. to be more sensitive, and vice + versa. In smoothing, reducing t gives less smoothing, and vice + versa. Set t=10 for the test image available from the SUSAN web + page. + + ITERATIONS: + + With SUSAN smoothing, more smoothing can also be obtained by + iterating the algorithm several times. This has a different effect + from varying d or t. + + FIXED MASKS: + + 37 pixel mask: ooo 3 by 3 mask: ooo + ooooo ooo + ooooooo ooo + ooooooo + ooooooo + ooooo + ooo + + CORNER ATTRIBUTES dx, dy and I + (Only read this if you are interested in the C implementation or in + using corner attributes, e.g., for corner matching) + + Corners reported in the corner list have attributes associated with + them as well as positions. This is useful, for example, when + attempting to match corners from one image to another, as these + attributes can often be fairly unchanged between images. The + attributes are dx, dy and I. I is the value of image brightness at + the position of the corner. In the case of susan_corners_quick, dx + and dy are the first order derivatives (differentials) of the image + brightness in the x and y directions respectively, at the position + of the corner. In the case of normal susan corner finding, dx and dy + are scaled versions of the position of the centre of gravity of the + USAN with respect to the centre pixel (nucleus). + + BRIGHTNESS FUNCTION LUT IMPLEMENTATION: + (Only read this if you are interested in the C implementation) + + The SUSAN brightness function is implemented as a LUT + (Look-Up-Table) for speed. The resulting pointer-based code is a + little hard to follow, so here is a brief explanation. In + setup_brightness_lut() the LUT is setup. This mallocs enough space + for *bp and then repositions the pointer to the centre of the + malloced space. The SUSAN function e^-(x^6) or e^-(x^2) is + calculated and converted to a uchar in the range 0-100, for all + possible image brightness differences (including negative + ones). Thus bp[23] is the output for a brightness difference of 23 + greylevels. In the SUSAN algorithms this LUT is used as follows: + + p=in + (i-3)*x_size + j - 1; + p points to the first image pixel in the circular mask surrounding + point (x,y). + + cp=bp + in[i*x_size+j]; + cp points to a position in the LUT corresponding to the brightness + of the centre pixel (x,y). + + now for every pixel within the mask surrounding (x,y), + n+=*(cp-*p++); + the brightness difference function is found by moving the cp pointer + down by an amount equal to the value of the pixel pointed to by p, + thus subtracting the two brightness values and performing the + exponential function. This value is added to n, the running USAN + area. + + in SUSAN smoothing, the variable height mask is implemented by + multiplying the above by the moving mask pointer, reset for each new + centre pixel. + tmp = *dpt++ * *(cp-brightness); + +\**********************************************************************/ + +/* }}} */ +/* {{{ Machine Information */ + +/**********************************************************************\ + + Success has been reported with the following: + + MACHINE OS COMPILER + + Sun 4.1.4 bundled C, gcc + + Next + + SGI IRIX SGI cc + + DEC Unix V3.2+ + + IBM RISC AIX gcc + + PC Borland 5.0 + + PC Linux gcc-2.6.3 + + PC Win32 Visual C++ 4.0 (Console Application) + + PC Win95 Visual C++ 5.0 (Console Application) + Thanks to Niu Yongsheng : + Use the FOPENB option below + + PC DOS djgpp gnu C + Thanks to Mark Pettovello : + Use the FOPENB option below + + HP HP-UX bundled cc + Thanks to Brian Dixon : + in ksh: + export CCOPTS="-Aa -D_HPUX_SOURCE | -lM" + cc -O3 -o susan susan2l.c + +\**********************************************************************/ + +/* }}} */ +/* {{{ History */ + +/**********************************************************************\ + + SUSAN Version 2l, 12/2/99 + Changed GNUDOS option to FOPENB. + (Thanks to Niu Yongsheng .) + Took out redundant "sq=sq/2;". + + SUSAN Version 2k, 19/8/98: + In corner finding: + Changed if(yyx_size) etc. tests in smoothing. + Added a couple of free() calls for cgx and cgy. + (Thanks to geoffb@ucs.ed.ac.uk - Geoff Browitt.) + + SUSAN Version 2i, 21/7/97: + Added information about corner attributes. + + SUSAN Version 2h, 16/12/96: + Added principle (initial enhancement) option. + + SUSAN Version 2g, 2/7/96: + Minor superficial changes to code. + + SUSAN Version 2f, 16/1/96: + Added GNUDOS option (now called FOPENB; see options below). + + SUSAN Version 2e, 9/1/96: + Added -b option. + Fixed 1 pixel horizontal offset error for drawing edges. + + SUSAN Version 2d, 27/11/95: + Fixed loading of certain PGM files in get_image (again!) + + SUSAN Version 2c, 22/11/95: + Fixed loading of certain PGM files in get_image. + (Thanks to qu@San-Jose.ate.slb.com - Gongyuan Qu.) + + SUSAN Version 2b, 9/11/95: + removed "z==" error in edges routines. + + SUSAN Version 2a, 6/11/95: + Removed a few unnecessary variable declarations. + Added different machine information. + Changed "header" in get_image to char. + + SUSAN Version 2, 1/11/95: first combined version able to take any + image sizes. + + SUSAN "Versions 1", circa 1992: the various SUSAN algorithms were + developed during my doctorate within different programs and for + fixed image sizes. The algorithms themselves are virtually unaltered + between "versions 1" and the combined program, version 2. + +\**********************************************************************/ + +/* }}} */ +/* {{{ defines, includes and typedefs */ + +/* ********** Optional settings */ + +#ifndef PPC +typedef int TOTAL_TYPE; /* this is faster for "int" but should be "float" for large d masks */ +#else +typedef float TOTAL_TYPE; /* for my PowerPC accelerator only */ +#endif + +/*#define FOPENB*/ /* uncomment if using djgpp gnu C for DOS or certain Win95 compilers */ +#define SEVEN_SUPP /* size for non-max corner suppression; SEVEN_SUPP or FIVE_SUPP */ +#define MAX_CORNERS 15000 /* max corners per frame */ + +/* ********** Leave the rest - but you may need to remove one or both of sys/file.h and malloc.h lines */ + +#include +#include +#include +#include +#define exit_error(IFB,IFC) { fprintf(stderr,IFB,IFC); exit(0); } +#define FTOI(a) ( (a) < 0 ? ((int)(a-0.5)) : ((int)(a+0.5)) ) +typedef unsigned char uchar; +typedef struct {int x,y,info, dx, dy, I;} CORNER_LIST[MAX_CORNERS]; + +/* }}} */ +/* {{{ usage() */ + +#ifdef OPENME +#include +#endif +#ifdef XOPENME +#include +#endif + +void usage(void) +{ + printf("Usage: susan [options]\n\n"); + + printf("-s : Smoothing mode (default)\n"); + printf("-e : Edges mode\n"); + printf("-c : Corners mode\n\n"); + + printf("See source code for more information about setting the thresholds\n"); + printf("-t : Brightness threshold, all modes (default=20)\n"); + printf("-d : Distance threshold, smoothing mode, (default=4) (use next option instead for flat 3x3 mask)\n"); + printf("-3 : Use flat 3x3 mask, edges or smoothing mode\n"); + printf("-n : No post-processing on the binary edge map (runs much faster); edges mode\n"); + printf("-q : Use faster (and usually stabler) corner mode; edge-like corner suppression not carried out; corners mode\n"); + printf("-b : Mark corners/edges with single black points instead of black with white border; corners or edges mode\n"); + printf("-p : Output initial enhancement image only; corners or edges mode (default is edges mode)\n"); + + printf("\nSUSAN Version 2l (C) 1995-1997 Stephen Smith, DRA UK. steve@fmrib.ox.ac.uk\n"); + + exit(0); +} + +/* }}} */ +/* {{{ get_image(filename,in,x_size,y_size) */ + +/* {{{ int getint(fp) derived from XV */ + +int getint(FILE* fd) +{ + int c, i; + char dummy[10000]; + + c = getc(fd); + while (1) /* find next integer */ + { + if (c=='#') /* if we're at a comment, read to end of line */ + fgets(dummy,9000,fd); + if (c==EOF) + exit_error("Image %s not binary PGM.\n","is"); + if (c>='0' && c<='9') + break; /* found what we were looking for */ + c = getc(fd); + } + + /* we're at the start of a number, continue until we hit a non-number */ + i = 0; + while (1) { + i = (i*10) + (c - '0'); + c = getc(fd); + if (c==EOF) return (i); + if (c<'0' || c>'9') break; + } + + return (i); +} + +/* }}} */ + +void get_image(char filename[200], unsigned char** in, int* x_size, int* y_size) +{ +FILE *fd; +char header [100]; +int tmp; + +#ifdef FOPENB + if ((fd=fopen(filename,"rb")) == NULL) +#else + if ((fd=fopen(filename,"r")) == NULL) +#endif + exit_error("Can't input image %s.\n",filename); + + /* {{{ read header */ + + header[0]=fgetc(fd); + header[1]=fgetc(fd); + if(!(header[0]=='P' && header[1]=='5')) + exit_error("Image %s does not have binary PGM header.\n",filename); + + *x_size = getint(fd); + *y_size = getint(fd); + tmp = getint(fd); + +/* }}} */ + + *in = (uchar *) malloc(*x_size * *y_size); + + if (fread(*in,1,*x_size * *y_size,fd) == 0) + exit_error("Image %s is wrong size.\n",filename); + + fclose(fd); +} + +/* }}} */ +/* {{{ put_image(filename,in,x_size,y_size) */ + +void put_image(char filename[100], char* in, int x_size, int y_size) +{ +FILE *fd; + +#ifdef FOPENB + if ((fd=fopen(filename,"wb")) == NULL) +#else + if ((fd=fopen(filename,"w")) == NULL) +#endif + exit_error("Can't output image%s.\n",filename); + + fprintf(fd,"P5\n"); + fprintf(fd,"%d %d\n",x_size,y_size); + fprintf(fd,"255\n"); + + if (fwrite(in,x_size*y_size,1,fd) != 1) + exit_error("Can't write image %s.\n",filename); + + fclose(fd); +} + +/* }}} */ +/* {{{ int_to_uchar(r,in,size) */ + +void int_to_uchar(int* r, uchar* in, int size) +{ +int i, + max_r=r[0], + min_r=r[0]; + + for (i=0; i max_r ) + max_r=r[i]; + if ( r[i] < min_r ) + min_r=r[i]; + } + + /*printf("min=%d max=%d\n",min_r,max_r);*/ + + max_r-=min_r; + + for (i=0; ip[l+1]) + { + tmp=p[l]; p[l]=p[l+1]; p[l+1]=tmp; + } + + return( (p[3]+p[4]) / 2 ); +} + +/* }}} */ +/* {{{ enlarge(in,tmp_image,x_size,y_size,border) */ + +/* this enlarges "in" so that borders can be dealt with easily */ + +void enlarge(uchar** in, uchar* tmp_image, int* x_size, int* y_size, int border) +{ +int i, j; + + for(i=0; i<*y_size; i++) /* copy *in into tmp_image */ + memcpy(tmp_image+(i+border)*(*x_size+2*border)+border, *in+i* *x_size, *x_size); + + for(i=0; i15) && (total==0) ) + { + printf("Distance_thresh (%f) too big for integer arithmetic.\n",dt); + printf("Either reduce it to <=15 or recompile with variable \"total\"\n"); + printf("as a float: see top \"defines\" section.\n"); + exit(0); + } + + if ( (2*mask_size+1>x_size) || (2*mask_size+1>y_size) ) + { + printf("Mask size (1.5*distance_thresh+1=%d) too big for image (%dx%d).\n",mask_size,x_size,y_size); + exit(0); + } + + tmp_image = (uchar *) malloc( (x_size+mask_size*2) * (y_size+mask_size*2) ); + enlarge(&in,tmp_image,&x_size,&y_size,mask_size); + +/* }}} */ + + if (three_by_three==0) + { /* large Gaussian masks */ + /* {{{ setup distance lut */ + + n_max = (mask_size*2) + 1; + + increment = x_size - n_max; + + dp = (unsigned char *)malloc(n_max*n_max); + dpt = dp; + temp = -(dt*dt); + + for(i=-mask_size; i<=mask_size; i++) + for(j=-mask_size; j<=mask_size; j++) + { + x = (int) (100.0 * exp( ((float)((i*i)+(j*j))) / temp )); + *dpt++ = (unsigned char)x; + } + +/* }}} */ + /* {{{ main section */ + + for (i=mask_size;im) { m=l[y+y+y+x]; a=y; b=x; } + + if (m>0) + { + if (mid[i*x_size+j]<4) + mid[(i+a-1)*x_size+j+b-1] = 4; + else + mid[(i+a-1)*x_size+j+b-1] = mid[i*x_size+j]+1; + if ( (a+a+b) < 3 ) /* need to jump back in image */ + { + i+=a-1; + j+=b-2; + if (i<4) i=4; + if (j<4) j=4; + } + } + } + +/* }}} */ + /* {{{ n==2 */ + + if (n==2) + { + /* put in a bit here to straighten edges */ + b00 = mid[(i-1)*x_size+j-1]<8; /* corners of 3x3 */ + b02 = mid[(i-1)*x_size+j+1]<8; + b20 = mid[(i+1)*x_size+j-1]<8; + b22 = mid[(i+1)*x_size+j+1]<8; + if ( ((b00+b02+b20+b22)==2) && ((b00|b22)&(b02|b20))) + { /* case: move a point back into line. + e.g. X O X CAN become X X X + O X O O O O + O O O O O O */ + if (b00) + { + if (b02) { x=0; y=-1; } + else { x=-1; y=0; } + } + else + { + if (b02) { x=1; y=0; } + else { x=0; y=1; } + } + if (((float)r[(i+y)*x_size+j+x]/(float)centre) > 0.7) + { + if ( ( (x==0) && (mid[(i+(2*y))*x_size+j]>7) && (mid[(i+(2*y))*x_size+j-1]>7) && (mid[(i+(2*y))*x_size+j+1]>7) ) || + ( (y==0) && (mid[(i)*x_size+j+(2*x)]>7) && (mid[(i+1)*x_size+j+(2*x)]>7) && (mid[(i-1)*x_size+j+(2*x)]>7) ) ) + { + mid[(i)*x_size+j]=100; + mid[(i+y)*x_size+j+x]=3; /* no jumping needed */ + } + } + } + else + { + b01 = mid[(i-1)*x_size+j ]<8; + b12 = mid[(i )*x_size+j+1]<8; + b21 = mid[(i+1)*x_size+j ]<8; + b10 = mid[(i )*x_size+j-1]<8; + /* {{{ right angle ends - not currently used */ + +#ifdef IGNORETHIS + if ( (b00&b01)|(b00&b10)|(b02&b01)|(b02&b12)|(b20&b10)|(b20&b21)|(b22&b21)|(b22&b12) ) + { /* case; right angle ends. clean up. + e.g.; X X O CAN become X X O + O X O O O O + O O O O O O */ + if ( ((b01)&(mid[(i-2)*x_size+j-1]>7)&(mid[(i-2)*x_size+j]>7)&(mid[(i-2)*x_size+j+1]>7)& + ((b00&((2*r[(i-1)*x_size+j+1])>centre))|(b02&((2*r[(i-1)*x_size+j-1])>centre)))) | + ((b10)&(mid[(i-1)*x_size+j-2]>7)&(mid[(i)*x_size+j-2]>7)&(mid[(i+1)*x_size+j-2]>7)& + ((b00&((2*r[(i+1)*x_size+j-1])>centre))|(b20&((2*r[(i-1)*x_size+j-1])>centre)))) | + ((b12)&(mid[(i-1)*x_size+j+2]>7)&(mid[(i)*x_size+j+2]>7)&(mid[(i+1)*x_size+j+2]>7)& + ((b02&((2*r[(i+1)*x_size+j+1])>centre))|(b22&((2*r[(i-1)*x_size+j+1])>centre)))) | + ((b21)&(mid[(i+2)*x_size+j-1]>7)&(mid[(i+2)*x_size+j]>7)&(mid[(i+2)*x_size+j+1]>7)& + ((b20&((2*r[(i+1)*x_size+j+1])>centre))|(b22&((2*r[(i+1)*x_size+j-1])>centre)))) ) + { + mid[(i)*x_size+j]=100; + if (b10&b20) j-=2; + if (b00|b01|b02) { i--; j-=2; } + } + } +#endif + +/* }}} */ + if ( ((b01+b12+b21+b10)==2) && ((b10|b12)&(b01|b21)) && + ((b01&((mid[(i-2)*x_size+j-1]<8)|(mid[(i-2)*x_size+j+1]<8)))|(b10&((mid[(i-1)*x_size+j-2]<8)|(mid[(i+1)*x_size+j-2]<8)))| + (b12&((mid[(i-1)*x_size+j+2]<8)|(mid[(i+1)*x_size+j+2]<8)))|(b21&((mid[(i+2)*x_size+j-1]<8)|(mid[(i+2)*x_size+j+1]<8)))) ) + { /* case; clears odd right angles. + e.g.; O O O becomes O O O + X X O X O O + O X O O X O */ + mid[(i)*x_size+j]=100; + i--; /* jump back */ + j-=2; + if (i<4) i=4; + if (j<4) j=4; + } + } + } + +/* }}} */ + /* {{{ n>2 the thinning is done here without breaking connectivity */ + + if (n>2) + { + b01 = mid[(i-1)*x_size+j ]<8; + b12 = mid[(i )*x_size+j+1]<8; + b21 = mid[(i+1)*x_size+j ]<8; + b10 = mid[(i )*x_size+j-1]<8; + if((b01+b12+b21+b10)>1) + { + b00 = mid[(i-1)*x_size+j-1]<8; + b02 = mid[(i-1)*x_size+j+1]<8; + b20 = mid[(i+1)*x_size+j-1]<8; + b22 = mid[(i+1)*x_size+j+1]<8; + p1 = b00 | b01; + p2 = b02 | b12; + p3 = b22 | b21; + p4 = b20 | b10; + + if( ((p1 + p2 + p3 + p4) - ((b01 & p2)+(b12 & p3)+(b21 & p4)+(b10 & p1))) < 2) + { + mid[(i)*x_size+j]=100; + i--; + j-=2; + if (i<4) i=4; + if (j<4) j=4; + } + } + } + +/* }}} */ + } +} + +/* }}} */ +/* {{{ susan_edges(in,r,sf,max_no,out) */ + +void susan_edges(uchar* in, int* r, uchar* mid, uchar* bp, + int max_no, int x_size, int y_size) +{ +float z; +int do_symmetry, i, j, m, n, a, b, x, y, w; +uchar c,*p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + for (i=3;i0) + { + m=r[i*x_size+j]; + n=max_no - m; + cp=bp + in[i*x_size+j]; + + if (n>600) + { + p=in + (i-3)*x_size + j - 1; + x=0;y=0; + + c=*(cp-*p++);x-=c;y-=3*c; + c=*(cp-*p++);y-=3*c; + c=*(cp-*p);x+=c;y-=3*c; + p+=x_size-3; + + c=*(cp-*p++);x-=2*c;y-=2*c; + c=*(cp-*p++);x-=c;y-=2*c; + c=*(cp-*p++);y-=2*c; + c=*(cp-*p++);x+=c;y-=2*c; + c=*(cp-*p);x+=2*c;y-=2*c; + p+=x_size-5; + + c=*(cp-*p++);x-=3*c;y-=c; + c=*(cp-*p++);x-=2*c;y-=c; + c=*(cp-*p++);x-=c;y-=c; + c=*(cp-*p++);y-=c; + c=*(cp-*p++);x+=c;y-=c; + c=*(cp-*p++);x+=2*c;y-=c; + c=*(cp-*p);x+=3*c;y-=c; + p+=x_size-6; + + c=*(cp-*p++);x-=3*c; + c=*(cp-*p++);x-=2*c; + c=*(cp-*p);x-=c; + p+=2; + c=*(cp-*p++);x+=c; + c=*(cp-*p++);x+=2*c; + c=*(cp-*p);x+=3*c; + p+=x_size-6; + + c=*(cp-*p++);x-=3*c;y+=c; + c=*(cp-*p++);x-=2*c;y+=c; + c=*(cp-*p++);x-=c;y+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p++);x+=c;y+=c; + c=*(cp-*p++);x+=2*c;y+=c; + c=*(cp-*p);x+=3*c;y+=c; + p+=x_size-5; + + c=*(cp-*p++);x-=2*c;y+=2*c; + c=*(cp-*p++);x-=c;y+=2*c; + c=*(cp-*p++);y+=2*c; + c=*(cp-*p++);x+=c;y+=2*c; + c=*(cp-*p);x+=2*c;y+=2*c; + p+=x_size-3; + + c=*(cp-*p++);x-=c;y+=3*c; + c=*(cp-*p++);y+=3*c; + c=*(cp-*p);x+=c;y+=3*c; + + z = sqrt((float)((x*x) + (y*y))); + if (z > (0.9*(float)n)) /* 0.5 */ + { + do_symmetry=0; + if (x==0) + z=1000000.0; + else + z=((float)y) / ((float)x); + if (z < 0) { z=-z; w=-1; } + else w=1; + if (z < 0.5) { /* vert_edge */ a=0; b=1; } + else { if (z > 2.0) { /* hor_edge */ a=1; b=0; } + else { /* diag_edge */ if (w>0) { a=1; b=1; } + else { a=-1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) && + (m > r[(i+(2*a))*x_size+j+(2*b)]) && (m >= r[(i-(2*a))*x_size+j-(2*b)]) ) + mid[i*x_size+j] = 1; + } + else + do_symmetry=1; + } + else + do_symmetry=1; + + if (do_symmetry==1) + { + p=in + (i-3)*x_size + j - 1; + x=0; y=0; w=0; + + /* | \ + y -x- w + | \ */ + + c=*(cp-*p++);x+=c;y+=9*c;w+=3*c; + c=*(cp-*p++);y+=9*c; + c=*(cp-*p);x+=c;y+=9*c;w-=3*c; + p+=x_size-3; + + c=*(cp-*p++);x+=4*c;y+=4*c;w+=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w+=2*c; + c=*(cp-*p++);y+=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w-=2*c; + c=*(cp-*p);x+=4*c;y+=4*c;w-=4*c; + p+=x_size-5; + + c=*(cp-*p++);x+=9*c;y+=c;w+=3*c; + c=*(cp-*p++);x+=4*c;y+=c;w+=2*c; + c=*(cp-*p++);x+=c;y+=c;w+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p++);x+=c;y+=c;w-=c; + c=*(cp-*p++);x+=4*c;y+=c;w-=2*c; + c=*(cp-*p);x+=9*c;y+=c;w-=3*c; + p+=x_size-6; + + c=*(cp-*p++);x+=9*c; + c=*(cp-*p++);x+=4*c; + c=*(cp-*p);x+=c; + p+=2; + c=*(cp-*p++);x+=c; + c=*(cp-*p++);x+=4*c; + c=*(cp-*p);x+=9*c; + p+=x_size-6; + + c=*(cp-*p++);x+=9*c;y+=c;w-=3*c; + c=*(cp-*p++);x+=4*c;y+=c;w-=2*c; + c=*(cp-*p++);x+=c;y+=c;w-=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p++);x+=c;y+=c;w+=c; + c=*(cp-*p++);x+=4*c;y+=c;w+=2*c; + c=*(cp-*p);x+=9*c;y+=c;w+=3*c; + p+=x_size-5; + + c=*(cp-*p++);x+=4*c;y+=4*c;w-=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w-=2*c; + c=*(cp-*p++);y+=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w+=2*c; + c=*(cp-*p);x+=4*c;y+=4*c;w+=4*c; + p+=x_size-3; + + c=*(cp-*p++);x+=c;y+=9*c;w-=3*c; + c=*(cp-*p++);y+=9*c; + c=*(cp-*p);x+=c;y+=9*c;w+=3*c; + + if (y==0) + z = 1000000.0; + else + z = ((float)x) / ((float)y); + if (z < 0.5) { /* vertical */ a=0; b=1; } + else { if (z > 2.0) { /* horizontal */ a=1; b=0; } + else { /* diagonal */ if (w>0) { a=-1; b=1; } + else { a=1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) && + (m > r[(i+(2*a))*x_size+j+(2*b)]) && (m >= r[(i-(2*a))*x_size+j-(2*b)]) ) + mid[i*x_size+j] = 2; + } + } + } +} + +/* }}} */ +/* {{{ susan_edges_small(in,r,sf,max_no,out) */ + +void susan_edges_small(uchar* in, int* r, uchar* mid, uchar* bp, + int max_no, int x_size, int y_size) +{ +float z; +int do_symmetry, i, j, m, n, a, b, x, y, w; +uchar c,*p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + max_no = 730; /* ho hum ;) */ + + for (i=1;i0) + { + m=r[i*x_size+j]; + n=max_no - m; + cp=bp + in[i*x_size+j]; + + if (n>250) + { + p=in + (i-1)*x_size + j - 1; + x=0;y=0; + + c=*(cp-*p++);x-=c;y-=c; + c=*(cp-*p++);y-=c; + c=*(cp-*p);x+=c;y-=c; + p+=x_size-2; + + c=*(cp-*p);x-=c; + p+=2; + c=*(cp-*p);x+=c; + p+=x_size-2; + + c=*(cp-*p++);x-=c;y+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p);x+=c;y+=c; + + z = sqrt((float)((x*x) + (y*y))); + if (z > (0.4*(float)n)) /* 0.6 */ + { + do_symmetry=0; + if (x==0) + z=1000000.0; + else + z=((float)y) / ((float)x); + if (z < 0) { z=-z; w=-1; } + else w=1; + if (z < 0.5) { /* vert_edge */ a=0; b=1; } + else { if (z > 2.0) { /* hor_edge */ a=1; b=0; } + else { /* diag_edge */ if (w>0) { a=1; b=1; } + else { a=-1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) ) + mid[i*x_size+j] = 1; + } + else + do_symmetry=1; + } + else + do_symmetry=1; + + if (do_symmetry==1) + { + p=in + (i-1)*x_size + j - 1; + x=0; y=0; w=0; + + /* | \ + y -x- w + | \ */ + + c=*(cp-*p++);x+=c;y+=c;w+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p);x+=c;y+=c;w-=c; + p+=x_size-2; + + c=*(cp-*p);x+=c; + p+=2; + c=*(cp-*p);x+=c; + p+=x_size-2; + + c=*(cp-*p++);x+=c;y+=c;w-=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p);x+=c;y+=c;w+=c; + + if (y==0) + z = 1000000.0; + else + z = ((float)x) / ((float)y); + if (z < 0.5) { /* vertical */ a=0; b=1; } + else { if (z > 2.0) { /* horizontal */ a=1; b=0; } + else { /* diagonal */ if (w>0) { a=-1; b=1; } + else { a=1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) ) + mid[i*x_size+j] = 2; + } + } + } +} + +/* }}} */ + +/* }}} */ +/* {{{ corners */ + +/* {{{ corner_draw(in,corner_list,drawing_mode) */ + +void corner_draw(uchar* in, CORNER_LIST corner_list, + int x_size, int drawing_mode) +{ +uchar *p; +int n=0; + + while(corner_list[n].info != 7) + { + if (drawing_mode==0) + { + p = in + (corner_list[n].y-1)*x_size + corner_list[n].x - 1; + *p++=255; *p++=255; *p=255; p+=x_size-2; + *p++=255; *p++=0; *p=255; p+=x_size-2; + *p++=255; *p++=255; *p=255; + n++; + } + else + { + p = in + corner_list[n].y*x_size + corner_list[n].x; + *p=0; + n++; + } + } +} + +/* }}} */ +/* {{{ susan(in,r,sf,max_no,corner_list) */ + +void susan_corners(uchar* in, int* r, uchar* bp, + int max_no, CORNER_LIST corner_list, + int x_size, int y_size) +{ +int n,x,y,sq,xx,yy, + i,j,*cgx,*cgy; +float divide; +uchar c,*p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + cgx=(int *)malloc(x_size*y_size*sizeof(int)); + cgy=(int *)malloc(x_size*y_size*sizeof(int)); + + for (i=5;i ((n*n)/2) ) + { + if(yy290){ + r[i*x_size+j] = max_no-n; + cgx[i*x_size+j] = (51*x)/n; + cgy[i*x_size+j] = (51*y)/n;} + } + } +}}}}}}}}}}}}}}}}}}} + + /* to locate the local maxima */ + n=0; + for (i=5;i0) { + /* 5x5 mask */ +#ifdef FIVE_SUPP + if ( + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i )*x_size+j+1]) && + (x>r[(i )*x_size+j+2]) && + (x>r[(i+1)*x_size+j-1]) && + (x>r[(i+1)*x_size+j ]) && + (x>r[(i+1)*x_size+j+1]) && + (x>r[(i+1)*x_size+j+2]) && + (x>r[(i+2)*x_size+j-2]) && + (x>r[(i+2)*x_size+j-1]) && + (x>r[(i+2)*x_size+j ]) && + (x>r[(i+2)*x_size+j+1]) && + (x>r[(i+2)*x_size+j+2]) && + (x>=r[(i-2)*x_size+j-2]) && + (x>=r[(i-2)*x_size+j-1]) && + (x>=r[(i-2)*x_size+j ]) && + (x>=r[(i-2)*x_size+j+1]) && + (x>=r[(i-2)*x_size+j+2]) && + (x>=r[(i-1)*x_size+j-2]) && + (x>=r[(i-1)*x_size+j-1]) && + (x>=r[(i-1)*x_size+j ]) && + (x>=r[(i-1)*x_size+j+1]) && + (x>=r[(i )*x_size+j-2]) && + (x>=r[(i )*x_size+j-1]) && + (x>=r[(i+1)*x_size+j-2]) ) +#endif +#ifdef SEVEN_SUPP + if ( + (x>r[(i-3)*x_size+j-3]) && + (x>r[(i-3)*x_size+j-2]) && + (x>r[(i-3)*x_size+j-1]) && + (x>r[(i-3)*x_size+j ]) && + (x>r[(i-3)*x_size+j+1]) && + (x>r[(i-3)*x_size+j+2]) && + (x>r[(i-3)*x_size+j+3]) && + + (x>r[(i-2)*x_size+j-3]) && + (x>r[(i-2)*x_size+j-2]) && + (x>r[(i-2)*x_size+j-1]) && + (x>r[(i-2)*x_size+j ]) && + (x>r[(i-2)*x_size+j+1]) && + (x>r[(i-2)*x_size+j+2]) && + (x>r[(i-2)*x_size+j+3]) && + + (x>r[(i-1)*x_size+j-3]) && + (x>r[(i-1)*x_size+j-2]) && + (x>r[(i-1)*x_size+j-1]) && + (x>r[(i-1)*x_size+j ]) && + (x>r[(i-1)*x_size+j+1]) && + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i-1)*x_size+j+3]) && + + (x>r[(i)*x_size+j-3]) && + (x>r[(i)*x_size+j-2]) && + (x>r[(i)*x_size+j-1]) && + (x>=r[(i)*x_size+j+1]) && + (x>=r[(i)*x_size+j+2]) && + (x>=r[(i)*x_size+j+3]) && + + (x>=r[(i+1)*x_size+j-3]) && + (x>=r[(i+1)*x_size+j-2]) && + (x>=r[(i+1)*x_size+j-1]) && + (x>=r[(i+1)*x_size+j ]) && + (x>=r[(i+1)*x_size+j+1]) && + (x>=r[(i+1)*x_size+j+2]) && + (x>=r[(i+1)*x_size+j+3]) && + + (x>=r[(i+2)*x_size+j-3]) && + (x>=r[(i+2)*x_size+j-2]) && + (x>=r[(i+2)*x_size+j-1]) && + (x>=r[(i+2)*x_size+j ]) && + (x>=r[(i+2)*x_size+j+1]) && + (x>=r[(i+2)*x_size+j+2]) && + (x>=r[(i+2)*x_size+j+3]) && + + (x>=r[(i+3)*x_size+j-3]) && + (x>=r[(i+3)*x_size+j-2]) && + (x>=r[(i+3)*x_size+j-1]) && + (x>=r[(i+3)*x_size+j ]) && + (x>=r[(i+3)*x_size+j+1]) && + (x>=r[(i+3)*x_size+j+2]) && + (x>=r[(i+3)*x_size+j+3]) ) +#endif +{ +corner_list[n].info=0; +corner_list[n].x=j; +corner_list[n].y=i; +corner_list[n].dx=cgx[i*x_size+j]; +corner_list[n].dy=cgy[i*x_size+j]; +corner_list[n].I=in[i*x_size+j]; +n++; +if(n==MAX_CORNERS){ + fprintf(stderr,"Too many corners.\n"); + exit(1); + }}}} +corner_list[n].info=7; + +free(cgx); +free(cgy); + +} + +/* }}} */ +/* {{{ susan_quick(in,r,sf,max_no,corner_list) */ + +void susan_corners_quick(uchar* in, int* r, uchar* bp, + int max_no, CORNER_LIST corner_list, + int x_size, int y_size) +{ +int n,x,y,i,j; +uchar *p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + for (i=7;i0) { + /* 5x5 mask */ +#ifdef FIVE_SUPP + if ( + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i )*x_size+j+1]) && + (x>r[(i )*x_size+j+2]) && + (x>r[(i+1)*x_size+j-1]) && + (x>r[(i+1)*x_size+j ]) && + (x>r[(i+1)*x_size+j+1]) && + (x>r[(i+1)*x_size+j+2]) && + (x>r[(i+2)*x_size+j-2]) && + (x>r[(i+2)*x_size+j-1]) && + (x>r[(i+2)*x_size+j ]) && + (x>r[(i+2)*x_size+j+1]) && + (x>r[(i+2)*x_size+j+2]) && + (x>=r[(i-2)*x_size+j-2]) && + (x>=r[(i-2)*x_size+j-1]) && + (x>=r[(i-2)*x_size+j ]) && + (x>=r[(i-2)*x_size+j+1]) && + (x>=r[(i-2)*x_size+j+2]) && + (x>=r[(i-1)*x_size+j-2]) && + (x>=r[(i-1)*x_size+j-1]) && + (x>=r[(i-1)*x_size+j ]) && + (x>=r[(i-1)*x_size+j+1]) && + (x>=r[(i )*x_size+j-2]) && + (x>=r[(i )*x_size+j-1]) && + (x>=r[(i+1)*x_size+j-2]) ) +#endif +#ifdef SEVEN_SUPP + if ( + (x>r[(i-3)*x_size+j-3]) && + (x>r[(i-3)*x_size+j-2]) && + (x>r[(i-3)*x_size+j-1]) && + (x>r[(i-3)*x_size+j ]) && + (x>r[(i-3)*x_size+j+1]) && + (x>r[(i-3)*x_size+j+2]) && + (x>r[(i-3)*x_size+j+3]) && + + (x>r[(i-2)*x_size+j-3]) && + (x>r[(i-2)*x_size+j-2]) && + (x>r[(i-2)*x_size+j-1]) && + (x>r[(i-2)*x_size+j ]) && + (x>r[(i-2)*x_size+j+1]) && + (x>r[(i-2)*x_size+j+2]) && + (x>r[(i-2)*x_size+j+3]) && + + (x>r[(i-1)*x_size+j-3]) && + (x>r[(i-1)*x_size+j-2]) && + (x>r[(i-1)*x_size+j-1]) && + (x>r[(i-1)*x_size+j ]) && + (x>r[(i-1)*x_size+j+1]) && + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i-1)*x_size+j+3]) && + + (x>r[(i)*x_size+j-3]) && + (x>r[(i)*x_size+j-2]) && + (x>r[(i)*x_size+j-1]) && + (x>=r[(i)*x_size+j+1]) && + (x>=r[(i)*x_size+j+2]) && + (x>=r[(i)*x_size+j+3]) && + + (x>=r[(i+1)*x_size+j-3]) && + (x>=r[(i+1)*x_size+j-2]) && + (x>=r[(i+1)*x_size+j-1]) && + (x>=r[(i+1)*x_size+j ]) && + (x>=r[(i+1)*x_size+j+1]) && + (x>=r[(i+1)*x_size+j+2]) && + (x>=r[(i+1)*x_size+j+3]) && + + (x>=r[(i+2)*x_size+j-3]) && + (x>=r[(i+2)*x_size+j-2]) && + (x>=r[(i+2)*x_size+j-1]) && + (x>=r[(i+2)*x_size+j ]) && + (x>=r[(i+2)*x_size+j+1]) && + (x>=r[(i+2)*x_size+j+2]) && + (x>=r[(i+2)*x_size+j+3]) && + + (x>=r[(i+3)*x_size+j-3]) && + (x>=r[(i+3)*x_size+j-2]) && + (x>=r[(i+3)*x_size+j-1]) && + (x>=r[(i+3)*x_size+j ]) && + (x>=r[(i+3)*x_size+j+1]) && + (x>=r[(i+3)*x_size+j+2]) && + (x>=r[(i+3)*x_size+j+3]) ) +#endif +{ +corner_list[n].info=0; +corner_list[n].x=j; +corner_list[n].y=i; +x = in[(i-2)*x_size+j-2] + in[(i-2)*x_size+j-1] + in[(i-2)*x_size+j] + in[(i-2)*x_size+j+1] + in[(i-2)*x_size+j+2] + + in[(i-1)*x_size+j-2] + in[(i-1)*x_size+j-1] + in[(i-1)*x_size+j] + in[(i-1)*x_size+j+1] + in[(i-1)*x_size+j+2] + + in[(i )*x_size+j-2] + in[(i )*x_size+j-1] + in[(i )*x_size+j] + in[(i )*x_size+j+1] + in[(i )*x_size+j+2] + + in[(i+1)*x_size+j-2] + in[(i+1)*x_size+j-1] + in[(i+1)*x_size+j] + in[(i+1)*x_size+j+1] + in[(i+1)*x_size+j+2] + + in[(i+2)*x_size+j-2] + in[(i+2)*x_size+j-1] + in[(i+2)*x_size+j] + in[(i+2)*x_size+j+1] + in[(i+2)*x_size+j+2]; + +corner_list[n].I=x/25; +/*corner_list[n].I=in[i*x_size+j];*/ +x = in[(i-2)*x_size+j+2] + in[(i-1)*x_size+j+2] + in[(i)*x_size+j+2] + in[(i+1)*x_size+j+2] + in[(i+2)*x_size+j+2] - + (in[(i-2)*x_size+j-2] + in[(i-1)*x_size+j-2] + in[(i)*x_size+j-2] + in[(i+1)*x_size+j-2] + in[(i+2)*x_size+j-2]); +x += x + in[(i-2)*x_size+j+1] + in[(i-1)*x_size+j+1] + in[(i)*x_size+j+1] + in[(i+1)*x_size+j+1] + in[(i+2)*x_size+j+1] - + (in[(i-2)*x_size+j-1] + in[(i-1)*x_size+j-1] + in[(i)*x_size+j-1] + in[(i+1)*x_size+j-1] + in[(i+2)*x_size+j-1]); + +y = in[(i+2)*x_size+j-2] + in[(i+2)*x_size+j-1] + in[(i+2)*x_size+j] + in[(i+2)*x_size+j+1] + in[(i+2)*x_size+j+2] - + (in[(i-2)*x_size+j-2] + in[(i-2)*x_size+j-1] + in[(i-2)*x_size+j] + in[(i-2)*x_size+j+1] + in[(i-2)*x_size+j+2]); +y += y + in[(i+1)*x_size+j-2] + in[(i+1)*x_size+j-1] + in[(i+1)*x_size+j] + in[(i+1)*x_size+j+1] + in[(i+1)*x_size+j+2] - + (in[(i-1)*x_size+j-2] + in[(i-1)*x_size+j-1] + in[(i-1)*x_size+j] + in[(i-1)*x_size+j+1] + in[(i-1)*x_size+j+2]); +corner_list[n].dx=x/15; +corner_list[n].dy=y/15; +n++; +if(n==MAX_CORNERS){ + fprintf(stderr,"Too many corners.\n"); + exit(1); + }}}} +corner_list[n].info=7; +} + +/* }}} */ + +/* }}} */ +/* {{{ main(argc, argv) */ + +int main(int argc, char* argv[]) +{ +/* {{{ vars */ + +char *tcp; +uchar *in, *bp, *mid; +float dt=4.0; +int *r, + argindex=3, + bt=20, + principle=0, + thin_post_proc=1, + three_by_three=0, + drawing_mode=0, + susan_quick=0, + max_no_corners=1850, + max_no_edges=2650, + mode = 0, + x_size, y_size; +CORNER_LIST corner_list; + +/* }}} */ + + long ct_repeat=0; + long ct_repeat_max=1; + int ct_return=0; + +#ifdef OPENME + openme_init(NULL,NULL,NULL,0); + openme_callback("PROGRAM_START", NULL); +#endif +#ifdef XOPENME + xopenme_init(1,2); +#endif + + if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); + + if (argc<3) + usage(); + + get_image(argv[1],&in,&x_size,&y_size); + +#ifdef XOPENME + xopenme_add_var_i(0, " \"image_size_x\":%u", x_size); + xopenme_add_var_i(1, " \"image_size_y\":%u", y_size); +#endif + +// printf("Size X=%u Size Y=%u\n", x_size, y_size); + /* FGG - changing dataset size */ +// x_size=8; +// y_size=8; +// printf("Size X=%u Size Y=%u\n", x_size, y_size); + + /* {{{ look at options */ + + while (argindex < argc) + { + tcp = argv[argindex]; + if (*tcp == '-') + switch (*++tcp) + { + case 's': /* smoothing */ + mode=0; + break; + case 'e': /* edges */ + mode=1; + break; + case 'c': /* corners */ + mode=2; + break; + case 'p': /* principle */ + principle=1; + break; + case 'n': /* thinning post processing */ + thin_post_proc=0; + break; + case 'b': /* simple drawing mode */ + drawing_mode=1; + break; + case '3': /* 3x3 flat mask */ + three_by_three=1; + break; + case 'q': /* quick susan mask */ + susan_quick=1; + break; + case 'd': /* distance threshold */ + if (++argindex >= argc){ + printf ("No argument following -d\n"); + exit(0);} + dt=atof(argv[argindex]); + if (dt<0) three_by_three=1; + break; + case 't': /* brightness threshold */ + if (++argindex >= argc){ + printf ("No argument following -t\n"); + exit(0);} + bt=atoi(argv[argindex]); + break; + } + else + usage(); + argindex++; + } + + if ( (principle==1) && (mode==0) ) + mode=1; + +/* }}} */ + /* {{{ main processing */ + +#ifdef OPENME + openme_callback("KERNEL_START", NULL); +#endif +#ifdef XOPENME + xopenme_clock_start(0); +#endif + + for (ct_repeat=0; ct_repeat +Click if you want to use Python virtual environment + +We suggest you to install a python virtual environment via CM though it's not strictly necessary +(CM can automatically detect and reuse your Python installation and environments): +```bash +cm run script "install python-venv" --name=loadgen +``` + +You can also install a specific version of Python on your system via: +```bash +cm run script "install python-venv" --name=loadgen --version=3.10.7 +``` + +By default, CM will be asking users to select one from all detected and installed Python versions +including the above one, any time a script with python dependency is run. To avoid that, you +can set up the following environment variable with the name of the current virtual environment: + +```bash +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen" +``` + +The `--adr` flag stands for "Add to all Dependencies Recursively" and will find all sub-dependencies on other CM scripts + + + + +### Install dependencies via CM (optional) + +
+Click if you want to install specific versions of dependencies + +You can skip this sub-section if you want CM to automatically detect already installed +ONNX runtime on your system. Otherwise, follow the next steps to install the latest or specific +version of ONNX runtime. + + +### Download LoadGen sources from MLPerf inference benchmark + +```bash +cm run script "get mlperf inference src" --version=r3.1 +``` + +### Install MLPerf LoadGen +We can now install loadgen via CM while forcing compiler dependency to GCC: + +```bash +cm run script "get mlperf loadgen" +``` + +### ONNX, CPU + +```bash +cm run script "get generic-python-lib _onnxruntime" +``` + +or + +```bash +cm run script "get generic-python-lib _onnxruntime" --version=1.13.1 +``` + +or + +```bash +cm run script "get generic-python-lib _onnxruntime" --version_min=1.10.0 +``` +
+ +### Benchmark standard MLPerf model + +You can use CM variations prefixed by `_` to benchmark an official MLPerf model +(_resnet50 or _retinanet): + +``` +cm run script "python app loadgen-generic _onnxruntime _retinanet" --samples=5 +cmr "python app loadgen-generic _onnxruntime _resnet50" +``` + +Normally, you should see the following performance report from the loadgen: + + + + +
+Click to open + +```bash + +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Model: /home/gfursin/CM/repos/local/cache/9c825a0a06fb48e2/resnet50_v1.onnx +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Runner: inline, Concurrency: 4 +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Results: results/resnet50_v1.onnx/inline +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Test Started +2022-12-06 16:51:39,399 INFO MainThread - loadgen.harness load_query_samples: Loaded 100 samples +2022-12-06 16:51:55,723 INFO MainThread - loadgen.harness issue_query: Queries issued 550 +2022-12-06 16:51:55,725 INFO MainThread - loadgen.harness flush_queries: Queries flushed +2022-12-06 16:51:55,731 INFO MainThread - loadgen.harness unload_query_samples: Unloaded samples +================================================ +MLPerf Results Summary +================================================ +SUT name : PySUT +Scenario : Offline +Mode : PerformanceOnly +Samples per second: 33.6903 +Result is : VALID + Min duration satisfied : Yes + Min queries satisfied : Yes + Early stopping satisfied: Yes + +================================================ +Additional Stats +================================================ +Min latency (ns) : 16325180169 +Max latency (ns) : 16325180169 +Mean latency (ns) : 16325180169 +50.00 percentile latency (ns) : 16325180169 +90.00 percentile latency (ns) : 16325180169 +95.00 percentile latency (ns) : 16325180169 +97.00 percentile latency (ns) : 16325180169 +99.00 percentile latency (ns) : 16325180169 +99.90 percentile latency (ns) : 16325180169 + +================================================ +Test Parameters Used +================================================ +samples_per_query : 550 +target_qps : 50 +target_latency (ns): 0 +max_async_queries : 1 +min_duration (ms): 10000 +max_duration (ms): 0 +min_query_count : 1 +max_query_count : 0 +qsl_rng_seed : 0 +sample_index_rng_seed : 0 +schedule_rng_seed : 0 +accuracy_log_rng_seed : 0 +accuracy_log_probability : 0 +accuracy_log_sampling_target : 0 +print_timestamps : 0 +performance_issue_unique : 0 +performance_issue_same : 0 +performance_issue_same_index : 0 +performance_sample_count : 100 + +No warnings encountered during test. + +No errors encountered during test. +2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Observed QPS: 33.6903 +2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Result: VALID +2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Test Completed + + - Running postprocess ... + - running time of script "app,loadgen,generic,loadgen-generic,python": 370.87 sec. + +``` + +
+ + +### Benchmark custom model + +You can also specify any custom onnx model file as follows: + +```bash +cm run script "python app loadgen-generic _onnxruntime" --modelpath= +``` + +### Benchmark Hugging Face model + +```bash +cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx +``` + +*See more examples to download Hugging Face models via CM [here](../get-ml-model-huggingface-zoo/README-extra.md).* + +### Benchmark using ONNX CUDA + +```bash +cm rm cache -f +cmr "python app loadgen-generic _onnxruntime _cuda _retinanet" --quiet +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx +``` + +These cases worked on Windows and Linux but may require GPU with > 8GB memory: +```bash +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-13b-ONNX" --adr.hf-downloader.model_filename=FP32/LlamaV2_13B_float32.onnx --adr.hf-downloader.full_subfolder=FP32 --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.Intel/gpt-j-6B-int8-static" --adr.hf-downloader.model_filename=model.onnx --adr.hf-downloader.full_subfolder=. --samples=2 +``` + +TBD: some cases that are not yet fully supported (data types, input mismatch, etc): +```bash +cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.runwayml/stable-diffusion-v1-5" --adr.hf-downloader.revision=onnx --adr.hf-downloader.model_filename=unet/model.onnx,unet/weights.pb --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.microsoft/Mistral-7B-v0.1-onnx" --adr.hf-downloader.model_filename=Mistral-7B-v0.1.onnx,Mistral-7B-v0.1.onnx.data --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-7b-ONNX" --adr.hf-downloader.model_filename=FP16/LlamaV2_7B_float16.onnx --adr.hf-downloader.full_subfolder=FP16 --samples=2 +``` + +### Other variations and flags: + +You can obtain help about flags and variations from CMD: + +```bash +cm run script "python app loadgen-generic" --help + +Available variations: + + _cpu + _cuda + _custom + _custom,huggingface + _huggingface + _model-stub.# + _onnxruntime + _pytorch + _resnet50 + _retinanet + +Available flags mapped to environment variables: + + --concurrency -> --env.CM_MLPERF_CONCURRENCY + --ep -> --env.CM_MLPERF_EXECUTION_PROVIDER + --execmode -> --env.CM_MLPERF_EXEC_MODE + --interop -> --env.CM_MLPERF_INTEROP + --intraop -> --env.CM_MLPERF_INTRAOP + --modelpath -> --env.CM_ML_MODEL_FILE_WITH_PATH + --output_dir -> --env.CM_MLPERF_OUTPUT_DIR + --runner -> --env.CM_MLPERF_RUNNER + --samples -> --env.CM_MLPERF_LOADGEN_SAMPLES + --scenario -> --env.CM_MLPERF_LOADGEN_SCENARIO + +``` + +## Running this app via Docker + +```bash +cm docker script "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx --samples=2 --output_dir=new_results --docker_cm_repo=ctuning@mlcommons-ck +``` + +## Tuning CPU performance via CM experiment + +```bash +cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet +cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet +``` + + +## Developers + +* [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal) +* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) +* [Grigori Fursin](https://cKnowledge.org/gfursin) + +## Get in touch + +* [MLCommons Task Force on Automation and Reproducibility](../../../docs/taskforce.md) +* [Public Discord server](https://discord.gg/JjWNWXKxwT) diff --git a/script/app-loadgen-generic-python/README.md b/script/app-loadgen-generic-python/README.md new file mode 100644 index 0000000000..443404358d --- /dev/null +++ b/script/app-loadgen-generic-python/README.md @@ -0,0 +1,322 @@ +Automatically generated README for this automation recipe: **app-loadgen-generic-python** + +Category: **Modular MLPerf inference benchmark pipeline** + +License: **Apache 2.0** + +Developers: [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-loadgen-generic-python,d3d949cc361747a6) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *python,app,generic,loadgen* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "python app generic loadgen" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=python,app,generic,loadgen` + +`cm run script --tags=python,app,generic,loadgen[,variations] [--input_flags]` + +*or* + +`cmr "python app generic loadgen"` + +`cmr "python app generic loadgen [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + + +#### Input Flags + +* --**modelpath**=Full path to file with model weights +* --**modelcodepath**=(for PyTorch models) Full path to file with model code and cmc.py +* --**modelcfgpath**=(for PyTorch models) Full path to JSON file with model cfg +* --**modelsamplepath**=(for PyTorch models) Full path to file with model sample in pickle format +* --**ep**=ONNX Execution provider +* --**scenario**=MLPerf LoadGen scenario +* --**samples**=Number of samples (*2*) +* --**runner**=MLPerf runner +* --**execmode**=MLPerf exec mode +* --**output_dir**=MLPerf output directory +* --**concurrency**=MLPerf concurrency +* --**intraop**=MLPerf intra op threads +* --**interop**=MLPerf inter op threads + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "modelpath":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'python,app,generic,loadgen' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="python,app,generic,loadgen"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=python,app,generic,loadgen) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "python app generic loadgen[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_cmc` + - Environment variables: + - *CM_CUSTOM_MODEL_CMC*: `True` + - Workflow: + * `_custom,cmc` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,cmc + - *Warning: no scripts found* + * `_custom,huggingface` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,huggingface + * CM names: `--adr.['hf-downloader']...` + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + * `_huggingface` + - Environment variables: + - *CM_CUSTOM_MODEL_SOURCE*: `huggingface` + - Workflow: + * `_model-stub.#` + - Environment variables: + - *CM_ML_MODEL_STUB*: `#` + - Workflow: + +
+ + + * Group "**backend**" +
+ Click here to expand this section. + + * **`_onnxruntime`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `onnxruntime` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - *CM_MLPERF_EXECUTION_PROVIDER*: `CPUExecutionProvider` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_EXECUTION_PROVIDER*: `CUDAExecutionProvider` + - Workflow: + +
+ + + * Group "**models**" +
+ Click here to expand this section. + + * `_custom` + - Environment variables: + - *CM_MODEL*: `custom` + - Workflow: + * `_resnet50` + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_onnxruntime` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--concurrency=value` → `CM_MLPERF_CONCURRENCY=value` +* `--ep=value` → `CM_MLPERF_EXECUTION_PROVIDER=value` +* `--execmode=value` → `CM_MLPERF_EXEC_MODE=value` +* `--interop=value` → `CM_MLPERF_INTEROP=value` +* `--intraop=value` → `CM_MLPERF_INTRAOP=value` +* `--loadgen_duration_sec=value` → `CM_MLPERF_LOADGEN_DURATION_SEC=value` +* `--loadgen_expected_qps=value` → `CM_MLPERF_LOADGEN_EXPECTED_QPS=value` +* `--modelcfg=value` → `CM_ML_MODEL_CFG=value` +* `--modelcfgpath=value` → `CM_ML_MODEL_CFG_WITH_PATH=value` +* `--modelcodepath=value` → `CM_ML_MODEL_CODE_WITH_PATH=value` +* `--modelpath=value` → `CM_ML_MODEL_FILE_WITH_PATH=value` +* `--modelsamplepath=value` → `CM_ML_MODEL_SAMPLE_WITH_PATH=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--runner=value` → `CM_MLPERF_RUNNER=value` +* `--samples=value` → `CM_MLPERF_LOADGEN_SAMPLES=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "concurrency":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_EXECUTION_MODE: `parallel` +* CM_MLPERF_BACKEND: `onnxruntime` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_psutil + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,cuda + * `if (CM_MLPERF_DEVICE == gpu)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,loadgen + * CM names: `--adr.['loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * get,generic-python-lib,_onnxruntime + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == cpu)` + * CM names: `--adr.['onnxruntime']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnxruntime_gpu + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == gpu)` + * CM names: `--adr.['onnxruntime']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx + * `if (CM_MLPERF_BACKEND == onnxruntime)` + * CM names: `--adr.['onnx']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + * `if (CM_MLPERF_BACKEND == pytorch AND CM_MLPERF_DEVICE == cpu)` + * CM names: `--adr.['torch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + * `if (CM_MLPERF_BACKEND == pytorch AND CM_MLPERF_DEVICE == cpu)` + * CM names: `--adr.['torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch_cuda + * `if (CM_MLPERF_BACKEND == pytorch AND CM_MLPERF_DEVICE == gpu)` + * CM names: `--adr.['torch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision_cuda + * `if (CM_MLPERF_BACKEND == pytorch AND CM_MLPERF_DEVICE == gpu)` + * CM names: `--adr.['torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,ml-model,resnet50,_onnx + * `if (CM_MODEL == resnet50)` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,ml-model,retinanet,_onnx,_fp32 + * `if (CM_MODEL == retinanet)` + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + * get,ml-model,retinanet,_onnx,_fp32 + * `if (CM_MODEL == retinanet)` + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-loadgen-generic-python/_cm.yaml) + +___ +### Script output +`cmr "python app generic loadgen [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_*` +#### New environment keys auto-detected from customize diff --git a/script/app-loadgen-generic-python/_cm.yaml b/script/app-loadgen-generic-python/_cm.yaml new file mode 100644 index 0000000000..08b63927ff --- /dev/null +++ b/script/app-loadgen-generic-python/_cm.yaml @@ -0,0 +1,322 @@ +# Identification of this CM script +alias: app-loadgen-generic-python +uid: d3d949cc361747a6 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Gaz Iqbal](https://www.linkedin.com/in/gaziqbal), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)" + + +# User-friendly tags to find this CM script +tags: + - app + - loadgen + - generic + - loadgen-generic + - python + +tags_help: "python app generic loadgen" + + +# Default environment +default_env: + CM_MLPERF_EXECUTION_MODE: parallel + CM_MLPERF_BACKEND: onnxruntime + +# Map script inputs to environment variables +input_mapping: + modelpath: CM_ML_MODEL_FILE_WITH_PATH + modelcodepath: CM_ML_MODEL_CODE_WITH_PATH + modelcfgpath: CM_ML_MODEL_CFG_WITH_PATH + modelcfg: CM_ML_MODEL_CFG + modelsamplepath: CM_ML_MODEL_SAMPLE_WITH_PATH + output_dir: CM_MLPERF_OUTPUT_DIR + scenario: CM_MLPERF_LOADGEN_SCENARIO + runner: CM_MLPERF_RUNNER + concurrency: CM_MLPERF_CONCURRENCY + ep: CM_MLPERF_EXECUTION_PROVIDER + intraop: CM_MLPERF_INTRAOP + interop: CM_MLPERF_INTEROP + execmode: CM_MLPERF_EXEC_MODE + samples: CM_MLPERF_LOADGEN_SAMPLES + loadgen_expected_qps: CM_MLPERF_LOADGEN_EXPECTED_QPS + loadgen_duration_sec: CM_MLPERF_LOADGEN_DURATION_SEC + +# New env keys exported from this script +new_env_keys: + - CM_MLPERF_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Get Python + - tags: get,python3 + names: + - python + - python3 + + # Extra package + - tags: get,generic-python-lib,_psutil + - tags: get,generic-python-lib,_package.numpy + + # Detect CUDA if required + - tags: get,cuda + enable_if_env: + CM_MLPERF_DEVICE: + - gpu + + # Install loadgen + - tags: get,loadgen + names: + - loadgen + + ######################################################################## + # Install ML engines via CM + # ONNX + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - cpu + tags: get,generic-python-lib,_onnxruntime + names: + - onnxruntime + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + tags: get,generic-python-lib,_onnxruntime_gpu + names: + - onnxruntime + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + tags: get,generic-python-lib,_onnx + names: + - onnx + + ######################################################################## + # Install ML engines via CM + # PyTorch + + # CPU + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cpu + tags: get,generic-python-lib,_torch + names: + - torch + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cpu + tags: get,generic-python-lib,_torchvision + names: + - torchvision + + # CUDA/GPU + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - gpu + tags: get,generic-python-lib,_torch_cuda + names: + - torch + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - gpu + tags: get,generic-python-lib,_torchvision_cuda + names: + - torchvision + + + + ######################################################################## + # Install MLPerf models + - enable_if_env: + CM_MODEL: + - resnet50 + tags: get,ml-model,resnet50,_onnx + + - enable_if_env: + CM_MODEL: + - retinanet + tags: get,ml-model,retinanet,_onnx,_fp32 + + - enable_if_env: + CM_MODEL: + - retinanet + tags: get,ml-model,retinanet,_onnx,_fp32 + + + + +# Customize this CM script +variations: + + pytorch: + group: backend + env: + CM_MLPERF_BACKEND: + pytorch + + onnxruntime: + group: backend + default: true + env: + CM_MLPERF_BACKEND: + onnxruntime + + + + cpu: + group: + device + default: + true + env: + CM_MLPERF_DEVICE: + cpu + CM_MLPERF_EXECUTION_PROVIDER: + CPUExecutionProvider + + cuda: + docker: + all_gpus: 'yes' + base_image: nvcr.io/nvidia/pytorch:24.03-py3 + group: + device + env: + CM_MLPERF_DEVICE: + gpu + CM_MLPERF_EXECUTION_PROVIDER: + CUDAExecutionProvider + + + + retinanet: + group: + models + env: + CM_MODEL: retinanet + + resnet50: + group: + models + env: + CM_MODEL: resnet50 + + custom: + group: + models + env: + CM_MODEL: custom + + + + huggingface: + env: + CM_CUSTOM_MODEL_SOURCE: huggingface + + custom,huggingface: + deps: + - tags: get,ml-model,huggingface + names: + - hf-downloader + update_tags_from_env_with_prefix: + "_model-stub.": + - CM_ML_MODEL_STUB + + model-stub.#: + env: + CM_ML_MODEL_STUB: "#" + + + cmc: + env: + CM_CUSTOM_MODEL_CMC: yes + + + custom,cmc: + deps: + - tags: get,ml-model,cmc + names: + - cmc-model + + +input_description: + modelpath: + desc: Full path to file with model weights + modelcodepath: + desc: (for PyTorch models) Full path to file with model code and cmc.py + modelcfgpath: + desc: (for PyTorch models) Full path to JSON file with model cfg + modelsamplepath: + desc: (for PyTorch models) Full path to file with model sample in pickle format + ep: + desc: ONNX Execution provider + scenario: + desc: MLPerf LoadGen scenario + samples: + desc: Number of samples + default: 2 + runner: + desc: MLPerf runner + execmode: + desc: MLPerf exec mode + output_dir: + desc: MLPerf output directory + concurrency: + desc: MLPerf concurrency + intraop: + desc: MLPerf intra op threads + interop: + desc: MLPerf inter op threads + + +docker: + skip_run_cmd: 'no' + input_paths: + - modelpath + - modelsamplepath + - env.CM_ML_MODEL_FILE_WITH_PATH + - env.CM_ML_MODEL_CODE_WITH_PATH + - output_dir + skip_input_for_fake_run: + - modelpath + - modelsamplepath + - env.CM_ML_MODEL_FILE_WITH_PATH + - env.CM_ML_MODEL_CODE_WITH_PATH + - output_dir + - scenario + - runner + - concurrency + - intraop + - interop + - execmode + - samples + - modelcfg.num_classes + - modelcfg.config diff --git a/script/app-loadgen-generic-python/customize.py b/script/app-loadgen-generic-python/customize.py new file mode 100644 index 0000000000..c8810dcd7b --- /dev/null +++ b/script/app-loadgen-generic-python/customize.py @@ -0,0 +1,101 @@ +# Developer: Grigori Fursin + +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + + if 'CM_ML_MODEL_FILE_WITH_PATH' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + + run_opts = env.get('CM_RUN_OPTS', '') + + if env.get('CM_MLPERF_BACKEND', '') != '': + run_opts +=" -b "+env['CM_MLPERF_BACKEND'] + + if env.get('CM_MLPERF_RUNNER', '') != '': + run_opts +=" -r "+env['CM_MLPERF_RUNNER'] + + if env.get('CM_MLPERF_CONCURRENCY', '') != '': + run_opts +=" --concurrency "+env['CM_MLPERF_CONCURRENCY'] + + if env.get('CM_MLPERF_EXECUTION_PROVIDER', '') != '': + run_opts +=" --ep "+env['CM_MLPERF_EXECUTION_PROVIDER'] + + if env.get('CM_MLPERF_INTRAOP', '') != '': + run_opts +=" --intraop "+env['CM_MLPERF_INTRAOP'] + + if env.get('CM_MLPERF_INTEROP', '') != '': + run_opts +=" --interop "+env['CM_MLPERF_INTEROP'] + + if env.get('CM_MLPERF_EXECMODE', '') != '': + run_opts +=" --execmode "+env['CM_MLPERF_EXECUTION_MODE'] + + if env.get('CM_MLPERF_LOADGEN_SAMPLES', '') != '': + run_opts +=" --samples "+env['CM_MLPERF_LOADGEN_SAMPLES'] + + if env.get('CM_MLPERF_LOADGEN_EXPECTED_QPS', '') != '': + run_opts +=" --loadgen_expected_qps "+env['CM_MLPERF_LOADGEN_EXPECTED_QPS'] + + if env.get('CM_MLPERF_LOADGEN_DURATION_SEC', '') != '': + run_opts +=" --loadgen_duration_sec "+env['CM_MLPERF_LOADGEN_DURATION_SEC'] + + if env.get('CM_MLPERF_OUTPUT_DIR', '') != '': + run_opts +=" --output "+env['CM_MLPERF_OUTPUT_DIR'] + + if env.get('CM_ML_MODEL_CODE_WITH_PATH', '') != '': + run_opts +=" --model_code "+env['CM_ML_MODEL_CODE_WITH_PATH'] + + + if env.get('CM_ML_MODEL_CFG_WITH_PATH', '') != '': + run_opts +=" --model_cfg "+env['CM_ML_MODEL_CFG_WITH_PATH'] + else: + # Check cfg from command line + cfg = env.get('CM_ML_MODEL_CFG', {}) + if len(cfg)>0: + del (env['CM_ML_MODEL_CFG']) + + import json, tempfile + tfile = tempfile.NamedTemporaryFile(mode="w+", suffix='.json') + + fd, tfile = tempfile.mkstemp(suffix='.json', prefix='cm-cfg-') + os.close(fd) + + with open(tfile, 'w') as fd: + json.dump(cfg, fd) + + env['CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile + + run_opts +=" --model_cfg " + tfile + + if env.get('CM_ML_MODEL_SAMPLE_WITH_PATH', '') != '': + run_opts +=" --model_sample_pickle "+env['CM_ML_MODEL_SAMPLE_WITH_PATH'] + + # Add path to file model weights at the end of command line + + run_opts += ' '+env['CM_ML_MODEL_FILE_WITH_PATH'] + + env['CM_RUN_OPTS'] = run_opts + + print ('') + print ('Assembled flags: {}'.format(run_opts)) + print ('') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '') + + if tfile!='' and os.path.isfile(tfile): + os.remove(tfile) + + return {'return':0} diff --git a/script/app-loadgen-generic-python/run.bat b/script/app-loadgen-generic-python/run.bat new file mode 100644 index 0000000000..3d4b5d58b3 --- /dev/null +++ b/script/app-loadgen-generic-python/run.bat @@ -0,0 +1,4 @@ +rem native script + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\main.py %CM_RUN_OPTS% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/app-loadgen-generic-python/run.sh b/script/app-loadgen-generic-python/run.sh new file mode 100644 index 0000000000..2a13312f07 --- /dev/null +++ b/script/app-loadgen-generic-python/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/main.py ${CM_RUN_OPTS} +test $? -eq 0 || exit 1 diff --git a/script/app-loadgen-generic-python/src/backend_onnxruntime.py b/script/app-loadgen-generic-python/src/backend_onnxruntime.py new file mode 100644 index 0000000000..e95e467b9f --- /dev/null +++ b/script/app-loadgen-generic-python/src/backend_onnxruntime.py @@ -0,0 +1,89 @@ +import typing + +import numpy as np +import onnx +import onnxruntime as ort + +from loadgen.model import Model, ModelFactory, ModelInput, ModelInputSampler + +xinput = input + +ONNX_TO_NP_TYPE_MAP = { + "tensor(bool)": bool, + "tensor(int)": np.int32, + "tensor(int32)": np.int32, + "tensor(int8)": np.int8, + "tensor(uint8)": np.uint8, + "tensor(int16)": np.int16, + "tensor(uint16)": np.uint16, + "tensor(uint64)": np.uint64, + "tensor(int64)": np.int64, + "tensor(float16)": np.float16, + "tensor(float)": np.float32, + "tensor(double)": np.float64, + "tensor(string)": np.string_, +} + + +class XModel(Model): + def __init__(self, session: ort.InferenceSession): + assert session is not None + self.session = session + + def predict(self, input: ModelInput): + output = self.session.run(None, input) + return output + + +class XModelFactory(ModelFactory): + def __init__( + self, + model_path: str, + execution_provider="CPUExecutionProvider", + execution_mode="", + intra_op_threads=0, + inter_op_threads=0, + model_code='', # Not used here + model_cfg={}, # Not used here + model_sample_pickle='' # Not used here + ): + self.model_path = model_path + self.execution_provider = execution_provider + self.session_options = ort.SessionOptions() + if execution_mode.lower() == "sequential": + self.session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL + elif execution_mode.lower() == "parallel": + self.session_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL + self.session_options.intra_op_num_threads = intra_op_threads + self.session_options.inter_op_num_threads = inter_op_threads + + def create(self) -> Model: + print ('Loading model: {}'.format(self.model_path)) +# model = onnx.load(self.model_path) + session_eps = [self.execution_provider] + session = ort.InferenceSession( +# model.SerializeToString(), self.session_options, providers=session_eps + self.model_path, self.session_options, providers=session_eps + ) + return XModel(session) + + +class XModelInputSampler(ModelInputSampler): + def __init__(self, model_factory: XModelFactory): + model = model_factory.create() + input_defs = model.session.get_inputs() + self.inputs: typing.Dict[str, typing.Tuple[np.dtype, typing.List[int]]] = dict() + for input in input_defs: + input_name = input.name + input_type = ONNX_TO_NP_TYPE_MAP[input.type] + input_dim = [ + 1 if (x is None or (type(x) is str)) else x for x in input.shape + ] + self.inputs[input_name] = (input_type, input_dim) + + def sample(self, id_: int) -> ModelInput: + input = dict() + for name, spec in self.inputs.items(): + val = np.random.random_sample(spec[1]).astype(spec[0]) + input[name] = val + return input diff --git a/script/app-loadgen-generic-python/src/backend_pytorch.py b/script/app-loadgen-generic-python/src/backend_pytorch.py new file mode 100644 index 0000000000..1fef350b44 --- /dev/null +++ b/script/app-loadgen-generic-python/src/backend_pytorch.py @@ -0,0 +1,126 @@ +# Developer: Grigori Fursin + +import typing +import importlib +import os +import psutil + +import utils + +import numpy as np + +import torch + +from loadgen.model import Model, ModelFactory, ModelInput, ModelInputSampler + + +xinput = input + +class XModel(Model): + def __init__(self, session): + assert session is not None + self.session = session + + def predict(self, input: ModelInput): + + print ('') + utils.print_host_memory_use('Host memory used') + + print ('Running inference ...') + with torch.no_grad(): + output = self.session(input) + + utils.print_host_memory_use('Host memory used') + + return output + + +class XModelFactory(ModelFactory): + def __init__( + self, + model_path: str, + execution_provider="CPUExecutionProvider", + execution_mode="", + intra_op_threads=0, + inter_op_threads=0, + model_code='', + model_cfg={}, + model_sample_pickle='' + ): + + self.model_path = model_path + self.model_code = model_code + self.model_cfg = model_cfg + self.model_sample_pickle = model_sample_pickle + self.execution_provider = execution_provider + + + def create(self) -> Model: + print ('') + print ('Loading model: {}'.format(self.model_path)) + + if self.execution_provider == 'CPUExecutionProvider': + torch_provider = 'cpu' + elif self.execution_provider == 'CUDAExecutionProvider': + torch_provider = 'cuda' + if not torch.cuda.is_available(): + raise Exception('Error: CUDA is forced but not available or installed in PyTorch!') + else: + raise Exception('Error: execution provider is unknown ({})!'.format(self.execution_provider)) + + checkpoint = torch.load(self.model_path, map_location=torch.device(torch_provider)) + + if self.model_code == '': + raise Exception('Error: path to model code was not provided!') + + if self.model_sample_pickle == '': + raise Exception('Error: path to model sample pickle was not provided!') + + # Load sample + import pickle + with open (self.model_sample_pickle, 'rb') as handle: + self.input_sample = pickle.load(handle) + + # Check if has CM connector + cm_model_module = os.path.join(self.model_code, 'cmc.py') + if not os.path.isfile(cm_model_module): + raise Exception('cm.py interface for a PyTorch model was not found in {}'.format(self.model_code)) + + print ('') + print ('Collective Mind Connector for the model found: {}'.format(cm_model_module)) + + + # Load CM interface for the model + import sys + sys.path.insert(0, self.model_code) + model_module=importlib.import_module('cmc') + del(sys.path[0]) + + # Init model + if len(self.model_cfg)>0: + print ('Model cfg: {}'.format(self.model_cfg)) + + r = model_module.model_init(checkpoint, self.model_cfg) + if r['return']>0: + raise Exception('Error: {}'.format(r['error'])) + + model = r['model'] + + if torch_provider=='cuda': + model.cuda() + + model.eval() + + return XModel(model) + + +class XModelInputSampler(ModelInputSampler): + def __init__(self, model_factory: XModelFactory): + model = model_factory.create() + self.input_sample = model_factory.input_sample + return + + def sample(self, id_: int) -> ModelInput: + input = self.input_sample + return input + diff --git a/script/app-loadgen-generic-python/src/loadgen/harness.py b/script/app-loadgen-generic-python/src/loadgen/harness.py new file mode 100644 index 0000000000..69edd2ba95 --- /dev/null +++ b/script/app-loadgen-generic-python/src/loadgen/harness.py @@ -0,0 +1,76 @@ +import abc +import contextlib +import logging +import typing + +import mlperf_loadgen + +from loadgen.model import ModelInput, ModelInputSampler + +logger = logging.getLogger(__name__) + + +QueryInput = typing.Dict[int, ModelInput] +QueryResult = typing.Dict[int, typing.Any] + + +class ModelRunner(contextlib.AbstractContextManager): + @abc.abstractmethod + def issue_query(self, query: QueryInput) -> typing.Optional[QueryResult]: + pass + + # Optional method to flush pending queries + def flush_queries(self) -> typing.Optional[QueryResult]: + pass + + def __exit__(self, _exc_type, _exc_value, _traceback): + logger.info(f"{self} : Exited") + return None + + +class Harness: + def __init__(self, sampler: ModelInputSampler, runner: ModelRunner): + self.sampler = sampler + self.runner = runner + self.samples = None + + def load_query_samples(self, query_samples): + assert self.samples is None + self.samples = dict() + for query_id in query_samples: + self.samples[query_id] = self.sampler.sample(query_id) + logger.info(f"Loaded {len(self.samples)} samples") + + def unload_query_samples(self, _query_samples): + assert self.samples is not None + logger.info(f"Unloaded samples") + self.samples = None + + def issue_query(self, query_samples): + query_input = dict() + for q in query_samples: + # logger.info(f"Query Id: {q.id}, SampleIndex: {q.index}") + input = self.samples[q.index] + query_input[q.id] = input + result = self.runner.issue_query(query_input) + logger.info(f"Queries issued {len(query_input)}") + if result is not None: + self._complete_query(result) + + # Called after the last call to issue queries in a series is made. + # Client can use this to flush any deferred queries rather than waiting for a timeout. + def flush_queries(self): + result = self.runner.flush_queries() + logger.info(f"Queries flushed") + if result is not None: + self._complete_query(result) + + def _complete_query(self, result: QueryResult): + responses = [] + for query_id, _query_result in result.items(): + response_data, response_size = 0, 0 + response = mlperf_loadgen.QuerySampleResponse( + query_id, response_data, response_size + ) + responses.append(response) + mlperf_loadgen.QuerySamplesComplete(responses) diff --git a/script/app-loadgen-generic-python/src/loadgen/model.py b/script/app-loadgen-generic-python/src/loadgen/model.py new file mode 100644 index 0000000000..8bb7dbf04c --- /dev/null +++ b/script/app-loadgen-generic-python/src/loadgen/model.py @@ -0,0 +1,24 @@ +import abc +import typing + +import numpy as np + +ModelInput = typing.Dict[str, np.array] + + +class Model(abc.ABC): + @abc.abstractmethod + def predict(self, input: ModelInput) -> typing.Any: + pass + + +class ModelFactory(abc.ABC): + @abc.abstractmethod + def create(self) -> Model: + pass + + +class ModelInputSampler(abc.ABC): + @abc.abstractmethod + def sample(self, id: int) -> ModelInput: + pass diff --git a/script/app-loadgen-generic-python/src/loadgen/runners.py b/script/app-loadgen-generic-python/src/loadgen/runners.py new file mode 100644 index 0000000000..1b78acba15 --- /dev/null +++ b/script/app-loadgen-generic-python/src/loadgen/runners.py @@ -0,0 +1,186 @@ +import abc +import concurrent.futures +import logging +import multiprocessing +import threading +import typing + +from loadgen.harness import ModelRunner, QueryInput, QueryResult +from loadgen.model import Model, ModelFactory, ModelInput + +logger = logging.getLogger(__name__) + +######## Runner implementations + + +class ModelRunnerInline(ModelRunner): + def __init__(self, model_factory: ModelFactory): + self.model = model_factory.create() + + def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]: + result = dict() + for query_id, model_input in queries.items(): + output = self.model.predict(model_input) + result[query_id] = output + return result + + +class ModelRunnerPoolExecutor(ModelRunner): + def __init__(self): + self.executor: concurrent.futures.Executor = None + self.futures = None + + def __exit__(self, _exc_type, _exc_value, _traceback): + if self.executor: + self.executor.shutdown(True) + return super().__exit__(_exc_type, _exc_value, _traceback) + + def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]: + self.futures = dict() + predictor_fn = self.get_predictor() + for query_id, model_input in queries.items(): + f = self.executor.submit(predictor_fn, model_input) + self.futures[f] = query_id + return None + + def flush_queries(self) -> typing.Optional[QueryResult]: + result = dict() + for future in concurrent.futures.as_completed(self.futures.keys()): + query_id = self.futures[future] + query_result = future.result() + result[query_id] = query_result + return result + + @abc.abstractmethod + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + pass + + +class ModelRunnerThreadPoolExecutor(ModelRunnerPoolExecutor): + def __init__(self, model_factory: ModelFactory, max_concurrency: int): + super().__init__() + self.model = model_factory.create() + self.max_concurrency = max_concurrency + + def __enter__(self): + self.executor = concurrent.futures.ThreadPoolExecutor( + max_workers=self.max_concurrency, thread_name_prefix="LoadGen" + ) + return self + + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + return self.model.predict + + +class ModelRunnerThreadPoolExecutorWithTLS(ModelRunnerPoolExecutor): + tls: threading.local + + def __init__(self, model_factory: ModelFactory, max_concurrency: int): + super().__init__() + self.model_factory = model_factory + self.max_concurrency = max_concurrency + + def __enter__(self): + self.executor = concurrent.futures.ThreadPoolExecutor( + max_workers=self.max_concurrency, + thread_name_prefix="LoadGen", + initializer=ModelRunnerThreadPoolExecutorWithTLS._tls_init, + initargs=(self.model_factory,), + ) + return self + + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + return ModelRunnerThreadPoolExecutorWithTLS._tls_predict + + @staticmethod + def _tls_init(model_factory: ModelFactory): + ModelRunnerThreadPoolExecutorWithTLS.tls = threading.local() + ModelRunnerThreadPoolExecutorWithTLS.tls.model = model_factory.create() + + @staticmethod + def _tls_predict(input: ModelInput): + return ModelRunnerThreadPoolExecutorWithTLS.tls.model.predict(input) + + +class ModelRunnerProcessPoolExecutor(ModelRunnerPoolExecutor): + _model: Model + + def __init__(self, model_factory: ModelFactory, max_concurrency: int): + super().__init__() + self.max_concurrency = max_concurrency + ModelRunnerProcessPoolExecutor._model = model_factory.create() + + def __enter__(self): + self.executor = concurrent.futures.ProcessPoolExecutor( + max_workers=self.max_concurrency + ) + return self + + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + return ModelRunnerProcessPoolExecutor._predict + + @staticmethod + def _predict(input: ModelInput): + result = ModelRunnerProcessPoolExecutor._model.predict(input) + return result + + +class ModelRunnerMultiProcessingPool(ModelRunner): + _model: Model + + def __init__( + self, + model_factory: ModelFactory, + max_concurrency: int, + ): + self.max_concurrency = max_concurrency + self.task: multiprocessing.ApplyResult = None + ModelRunnerMultiProcessingPool._model = model_factory.create() + + def __enter__(self): + self.pool = multiprocessing.Pool(self.max_concurrency) + + def __exit__(self, _exc_type, _exc_value, _traceback): + if self.pool: + self.pool.terminate() + return super().__exit__(_exc_type, _exc_value, _traceback) + + def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]: + if hasattr(self, "tasks"): + assert len(self.tasks) == 0 + for query_id, model_input in queries.items(): + task = self.pool.apply_async( + ModelRunnerMultiProcessingPool._predict, (model_input,) + ) + self.tasks[task] = query_id + else: + assert self.task is None + inputs = [ + [query_id, model_input] for query_id, model_input in queries.items() + ] + self.task = self.pool.starmap_async( + ModelRunnerMultiProcessingPool._predict_with_id, inputs + ) + return None + + def flush_queries(self) -> typing.Optional[QueryResult]: + if hasattr(self, "tasks"): + result = dict() + for task, query_id in self.tasks.items(): + task_result = task.get() + result[query_id] = task_result + return result + else: + task_result = self.task.get() + result = {query_id: query_result for query_id, query_result in task_result} + return result + + @staticmethod + def _predict(input: ModelInput): + result = ModelRunnerMultiProcessingPool._model.predict(input) + return result + + @staticmethod + def _predict_with_id(query_id: int, input: ModelInput): + result = ModelRunnerMultiProcessingPool._model.predict(input) + return (query_id, result) diff --git a/script/app-loadgen-generic-python/src/main.py b/script/app-loadgen-generic-python/src/main.py new file mode 100644 index 0000000000..0055ecaf2f --- /dev/null +++ b/script/app-loadgen-generic-python/src/main.py @@ -0,0 +1,238 @@ +import argparse +import contextlib +import logging +import os +import re +import typing + +import mlperf_loadgen +import psutil + +from loadgen.harness import Harness, ModelRunner +from loadgen.runners import ( + ModelRunnerInline, + ModelRunnerMultiProcessingPool, + ModelRunnerProcessPoolExecutor, + ModelRunnerThreadPoolExecutor, + ModelRunnerThreadPoolExecutorWithTLS, +) + +logger = logging.getLogger(__name__) + + +def main( + backend: str, + model_path: str, + model_code: str, + model_cfg: str, + model_sample_pickle: str, + output_path: typing.Optional[str], + runner_name: str, + runner_concurrency: int, + execution_provider: str, + execution_mode: str, + intraop_threads: int, + interop_threads: int, + samples: int, + loadgen_expected_qps: float, + loadgen_duration_sec: float +): + + print ('=====================================================================') + + if backend == 'onnxruntime': + from backend_onnxruntime import XModelFactory + from backend_onnxruntime import XModelInputSampler + elif backend == 'pytorch': + from backend_pytorch import XModelFactory + from backend_pytorch import XModelInputSampler + else: + raise Exception("Error: backend is not recognized.") + + # Load model cfg + model_cfg_dict = {} + if model_cfg!='': + import json + + with open(model_cfg) as mc: + model_cfg_dict = json.load(mc) + + model_factory = XModelFactory( + model_path, + execution_provider, + execution_mode, + interop_threads, + intraop_threads, + model_code, + model_cfg_dict, + model_sample_pickle + ) + + model_dataset = XModelInputSampler(model_factory) + + runner: ModelRunner = None + if runner_name == "inline": + runner = ModelRunnerInline(model_factory) + elif runner_name == "threadpool": + runner = ModelRunnerThreadPoolExecutor( + model_factory, max_concurrency=runner_concurrency + ) + elif runner_name == "threadpool+replication": + runner = ModelRunnerThreadPoolExecutorWithTLS( + model_factory, max_concurrency=runner_concurrency + ) + elif runner_name == "processpool": + runner = ModelRunnerProcessPoolExecutor( + model_factory, max_concurrency=runner_concurrency + ) + elif runner_name == "processpool+mp": + runner = ModelRunnerMultiProcessingPool( + model_factory, max_concurrency=runner_concurrency + ) + else: + raise ValueError(f"Invalid runner {runner}") + + settings = mlperf_loadgen.TestSettings() + + settings.scenario = mlperf_loadgen.TestScenario.Offline + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + settings.offline_expected_qps = loadgen_expected_qps + settings.min_query_count = samples + settings.max_query_count = samples + settings.min_duration_ms = loadgen_duration_sec * 1000 + # Duration isn't enforced in offline mode + # Instead, it is used to determine total sample count via + # target_sample_count = Slack (1.1) * TargetQPS (1) * TargetDuration () + # samples_per_query = Max(min_query_count, target_sample_count) + + output_path = "results" if not output_path else output_path + output_path = os.path.join(output_path, os.path.basename(model_path), runner_name) + os.makedirs(output_path, exist_ok=True) + + output_settings = mlperf_loadgen.LogOutputSettings() + output_settings.outdir = output_path + output_settings.copy_summary_to_stdout = True + + log_settings = mlperf_loadgen.LogSettings() + log_settings.log_output = output_settings + log_settings.enable_trace = False + + logger.info(f"Model: {model_path}") + logger.info(f"Runner: {runner_name}, Concurrency: {runner_concurrency}") + logger.info(f"Results: {output_path}") + + with contextlib.ExitStack() as stack: + stack.enter_context(runner) + harness = Harness(model_dataset, runner) + + query_sample_libary = mlperf_loadgen.ConstructQSL( + samples, # Total sample count + samples, # Num to load in RAM at a time + harness.load_query_samples, + harness.unload_query_samples, + ) + system_under_test = mlperf_loadgen.ConstructSUT( + harness.issue_query, harness.flush_queries + ) + + print ('=====================================================================') + logger.info("Test Started") + + mlperf_loadgen.StartTestWithLogSettings( + system_under_test, query_sample_libary, settings, log_settings + ) + + logger.info("Test Finished") + print ('=====================================================================') + + # Parse output file + output_summary = {} + output_summary_path = os.path.join(output_path, "mlperf_log_summary.txt") + with open(output_summary_path, "r") as output_summary_file: + for line in output_summary_file: + m = re.match(r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.]+).*", line) + if m: + output_summary[m.group(1).strip()] = m.group(2).strip() + logger.info("Observed QPS: " + output_summary.get("Samples per second")) + logger.info("Result: " + output_summary.get("Result is")) + + mlperf_loadgen.DestroySUT(system_under_test) + mlperf_loadgen.DestroyQSL(query_sample_libary) + logger.info("Test Completed") + print ('=====================================================================') + + +if __name__ == "__main__": + print ('') + + logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s %(levelname)s %(threadName)s - %(name)s %(funcName)s: %(message)s", + ) + + parser = argparse.ArgumentParser() + parser.add_argument( + "model_path", help="path to input model", default="models/yolov5s.onnx" + ) + parser.add_argument("-b", "--backend", help="backend", default="onnxruntime") + parser.add_argument("-o", "--output", help="path to store loadgen results") + parser.add_argument( + "-r", + "--runner", + help="model runner", + choices=[ + "inline", + "threadpool", + "threadpool+replication", + "processpool", + "processpool+mp", + ], + default="inline", + ) + parser.add_argument( + "--concurrency", + help="concurrency count for runner", + default=psutil.cpu_count(False), + type=int, + ) + parser.add_argument( + "--ep", help="Execution Provider", default="CPUExecutionProvider" + ) + parser.add_argument("--intraop", help="IntraOp threads", default=0, type=int) + parser.add_argument("--interop", help="InterOp threads", default=0, type=int) + parser.add_argument( + "--execmode", + help="Execution Mode", + choices=["sequential", "parallel"], + default="sequential", + ) + parser.add_argument( + "--samples", + help="number of samples", + default=100, + type=int, + ) + parser.add_argument("--loadgen_expected_qps", help="Expected QPS", default=1, type=float) + parser.add_argument("--loadgen_duration_sec", help="Expected duration in sec.", default=1, type=float) + parser.add_argument("--model_code", help="(for PyTorch models) path to model code with cmc.py", default="") + parser.add_argument("--model_cfg", help="(for PyTorch models) path to model's configuration in JSON file", default="") + parser.add_argument("--model_sample_pickle", help="(for PyTorch models) path to a model sample in pickle format", default="") + + args = parser.parse_args() + main( + args.backend, + args.model_path, + args.model_code, + args.model_cfg, + args.model_sample_pickle, + args.output, + args.runner, + args.concurrency, + args.ep, + args.execmode, + args.intraop, + args.interop, + args.samples, + args.loadgen_expected_qps, + args.loadgen_duration_sec + ) diff --git a/script/app-loadgen-generic-python/src/utils.py b/script/app-loadgen-generic-python/src/utils.py new file mode 100644 index 0000000000..8c182650c5 --- /dev/null +++ b/script/app-loadgen-generic-python/src/utils.py @@ -0,0 +1,16 @@ +# Developer: Grigori Fursin + +import os +import psutil + +def print_host_memory_use(text=''): + + pid = os.getpid() + python_process = psutil.Process(pid) + memoryUse = python_process.memory_info()[0] + + if text == '': text = 'host memory use' + + print('{}: {} MB'.format(text, int(memoryUse/1000000))) + + return diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat new file mode 100644 index 0000000000..c7154832fb --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat @@ -0,0 +1,7 @@ +rem set CM_CACHE=--no-cache + +set CM_DOCKER_ORG=modularcm +set CM_DOCKER_NAME=loadgen-generic-python +set CM_OS_NAME=ubuntu +set CM_HW_TARGET=cpu +set CM_OS_VERSION=22.04 diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh new file mode 100644 index 0000000000..5f49d3be9b --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +#export CM_CACHE="--no-cache" + +export CM_DOCKER_ORG=modularcm +export CM_DOCKER_NAME="loadgen-generic-python" +export CM_OS_NAME="ubuntu" +export CM_HW_TARGET="cpu" +export CM_OS_VERSION="22.04" + diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat new file mode 100644 index 0000000000..f51ea46b64 --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat @@ -0,0 +1,16 @@ +call _common.bat + +docker build -f %CM_DOCKER_NAME%--%CM_OS_NAME%-%CM_HW_TARGET%.Dockerfile ^ + -t %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% ^ + --build-arg cm_os_name=%CM_OS_NAME% ^ + --build-arg cm_hw_target=%CM_HW_TARGET% ^ + --build-arg cm_os_version=%CM_OS_VERSION% ^ + --build-arg cm_version="" ^ + --build-arg cm_automation_repo="ctuning@mlcommons-ck" ^ + --build-arg cm_automation_checkout="" ^ + --build-arg cm_python_version="3.10.8" ^ + --build-arg cm_mlperf_inference_loadgen_version="" ^ + --build-arg cm_mlperf_inference_src_tags="" ^ + --build-arg cm_mlperf_inference_src_version="" ^ + --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" ^ + %CM_CACHE% . diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh new file mode 100644 index 0000000000..186a0eae94 --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh @@ -0,0 +1,18 @@ +#! /bin/bash + +. ./_common.sh + +time docker build -f ${CM_DOCKER_NAME}--${CM_OS_NAME}-${CM_HW_TARGET}.Dockerfile \ + -t ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-${CM_HW_TARGET}:${CM_OS_NAME}-${CM_OS_VERSION} \ + --build-arg cm_os_name=${CM_OS_NAME} \ + --build-arg cm_hw_target=${CM_HW_TARGET} \ + --build-arg cm_os_version=${CM_OS_VERSION} \ + --build-arg cm_version="" \ + --build-arg cm_automation_repo="ctuning@mlcommons-ck" \ + --build-arg cm_automation_checkout="" \ + --build-arg cm_python_version="3.10.8" \ + --build-arg cm_mlperf_inference_loadgen_version="" \ + --build-arg cm_mlperf_inference_src_tags="" \ + --build-arg cm_mlperf_inference_src_version="" \ + --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" \ + ${CM_CACHE} . diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile new file mode 100644 index 0000000000..c82296c664 --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile @@ -0,0 +1,96 @@ +# Modular MLPerf container with the MLCommons CM automation meta-framework + +# Preparing OS +ARG cm_os_name="ubuntu" +ARG cm_os_version="22.04" + +FROM ${cm_os_name}:${cm_os_version} + +# Maintained by the MLCommons taskforce on automation and reproducibility and OctoML +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +# Customization +ARG CM_GH_TOKEN + +# Prepare shell and entry point +SHELL ["/bin/bash", "-c"] +ENTRYPOINT ["/bin/bash", "-c"] + +# Install system dependencies +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +RUN apt-get update -y +RUN apt-get install -y lsb-release +RUN apt-get install -y python3 python3-pip git wget sudo + +# Extra python deps +RUN python3 -m pip install requests + +# CM version +ARG cm_version="" +ENV CM_VERSION="${cm_version}" +RUN if [ "${CM_VERSION}" != "" ] ; then \ + python3 -m pip install cmind==${CM_VERSION} ; \ + else \ + python3 -m pip install cmind ; \ + fi + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +# See example in https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU +RUN groupadd --gid 10001 cm +RUN useradd --uid 10000 -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +USER cmuser:cm +WORKDIR /home/cmuser + +# Check CM installation +RUN lsb_release -a > sys-version-os.log +RUN uname -a > sys-version-kernel.log +RUN python3 --version > sys-version-python3.log +RUN cm version > sys-version-cm.log + +################################################################################ +# Get CM automation repository +ARG cm_automation_repo="mlcommons@ck" +ARG cm_automation_repo_checkout="" +ENV CM_AUTOMATION_REPO=${cm_automation_repo} +ENV CM_AUTOMATION_REPO_CHECKOUT=${cm_automation_repo_checkout} +RUN echo ${CM_AUTOMATION_REPO} +RUN cm pull repo ${CM_AUTOMATION_REPO} --checkout=${CM_AUTOMATION_REPO_CHECKOUT} + +################################################################################ +# Install CM system dependencies +RUN cm run script "get sys-utils-cm" --quiet + +# Detect/install python +ARG cm_python_version="" +RUN cm run script "get python3" --version=${cm_python_version} + +################################################################################ +# Build MLPerf loadgen +ARG cm_mlperf_inference_loadgen_version="" +RUN cm run script "get mlperf loadgen" --adr.compiler.tags=gcc --version=${cm_mlperf_inference_loadgen_version} --adr.inference-src-loadgen.version=${cm_mlperf_inference_loadgen_version} -v + +################################################################################ +# Install ONNX runtime +ARG CM_ONNXRUNTIME_VERSION="" +RUN cm run script "get generic-python-lib _onnxruntime" --version=${CM_ONNXRUNTIME_VERSION} + +ARG CM_MLPERF_CHOICE_BACKEND="onnxruntime" +ARG CM_MLPERF_CHOICE_DEVICE="cpu" + +RUN cm run script --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 \ + --adr.compiler.tags=gcc \ + --adr.python.version_min=3.8 \ + --quiet \ + --fake_run + +################################################################################ +# CMD entry point +CMD /bin/bash diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile new file mode 100644 index 0000000000..195acdec6a --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile @@ -0,0 +1,33 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +ENV PATH=${PATH}:$HOME/.local/bin +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo ctuning@mlcommons-ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm run script --quiet --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 --fake_run diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat new file mode 100644 index 0000000000..171aeecab9 --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat @@ -0,0 +1,3 @@ +call _common.bat + +docker run -it %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh new file mode 100644 index 0000000000..c82d4b7b12 --- /dev/null +++ b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh @@ -0,0 +1,3 @@ +. ./_common.sh + +docker run -it ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-%CM_HW_TARGET%:${CM_OS_NAME}-${CM_OS_VERSION} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/README.md b/script/app-mlperf-inference-ctuning-cpp-tflite/README.md new file mode 100644 index 0000000000..a36fc20dc8 --- /dev/null +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/README.md @@ -0,0 +1,368 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference-ctuning-cpp-tflite** + +Category: **Modular MLPerf inference benchmark pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference-ctuning-cpp-tflite,415904407cca404a) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *app,mlperf,inference,tflite-cpp* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app mlperf inference tflite-cpp" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,mlperf,inference,tflite-cpp` + +`cm run script --tags=app,mlperf,inference,tflite-cpp[,variations] [--input_flags]` + +*or* + +`cmr "app mlperf inference tflite-cpp"` + +`cmr "app mlperf inference tflite-cpp [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,mlperf,inference,tflite-cpp' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,mlperf,inference,tflite-cpp"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,mlperf,inference,tflite-cpp) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app mlperf inference tflite-cpp[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_armnn` + - Environment variables: + - *CM_MLPERF_TFLITE_USE_ARMNN*: `yes` + - *CM_TMP_LINK_LIBS*: `tensorflowlite,armnn` + - Workflow: + * `_armnn,tflite` + - Environment variables: + - *CM_MLPERF_BACKEND*: `armnn_tflite` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - *CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX*: `tflite_armnn_cpp` + - *CM_TMP_LINK_LIBS*: `tensorflowlite,armnn,armnnTfLiteParser` + - *CM_TMP_SRC_FOLDER*: `armnn` + - Workflow: + +
+ + + * Group "**backend**" +
+ Click here to expand this section. + + * `_tf` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tf` + - Workflow: + * **`_tflite`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `tflite` + - *CM_MLPERF_BACKEND_VERSION*: `master` + - *CM_TMP_LINK_LIBS*: `tensorflowlite` + - *CM_TMP_SRC_FOLDER*: `src` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * `_gpu` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * **`_singlestream`** (default) + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_efficientnet` + - Environment variables: + - *CM_MODEL*: `efficientnet` + - Workflow: + * `_mobilenet` + - Environment variables: + - *CM_MODEL*: `mobilenet` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + +
+ + + * Group "**optimization-target**" +
+ Click here to expand this section. + + * `_use-neon` + - Environment variables: + - *CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1*: `using_neon` + - *CM_MLPERF_TFLITE_USE_NEON*: `1` + - Workflow: + * `_use-opencl` + - Environment variables: + - *CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1*: `using_opencl` + - *CM_MLPERF_TFLITE_USE_OPENCL*: `1` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_MLPERF_MODEL_PRECISION*: `float32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_DATASET_COMPRESSED*: `on` + - *CM_MLPERF_MODEL_PRECISION*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_DATASET_COMPRESSED*: `on` + - *CM_MLPERF_MODEL_PRECISION*: `uint8` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_fp32,_resnet50,_singlestream,_tflite` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--compressed_dataset=value` → `CM_DATASET_COMPRESSED=value` +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` +* `--verbose=value` → `CM_VERBOSE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "compressed_dataset":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET_COMPRESSED: `off` +* CM_DATASET_INPUT_SQUARE_SIDE: `224` +* CM_FAST_COMPILATION: `yes` +* CM_LOADGEN_BUFFER_SIZE: `1024` +* CM_MLPERF_LOADGEN_MODE: `accuracy` +* CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` +* CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: `0` +* CM_MLPERF_OUTPUT_DIR: `.` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `tflite_cpp` +* CM_MLPERF_TFLITE_USE_NEON: `0` +* CM_MLPERF_TFLITE_USE_OPENCL: `0` +* CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` +* CM_ML_MODEL_NORMALIZE_DATA: `0` +* CM_ML_MODEL_SUBTRACT_MEANS: `1` +* CM_VERBOSE: `0` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,cuda + * `if (CM_MLPERF_DEVICE == gpu)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,loadgen + * CM names: `--adr.['loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,ml-model,mobilenet,raw,_tflite + * `if (CM_MLPERF_BACKEND in ['tflite', 'armnn_tflite'] AND CM_MODEL == mobilenet)` + * CM names: `--adr.['ml-model', 'tflite-model', 'mobilenet-model']...` + - CM script: [get-ml-model-mobilenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-mobilenet) + * get,ml-model,resnet50,raw,_tflite,_no-argmax + * `if (CM_MLPERF_BACKEND in ['tflite', 'armnn_tflite'] AND CM_MODEL == resnet50)` + * CM names: `--adr.['ml-model', 'tflite-model', 'resnet50-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,ml-model,resnet50,raw,_tf + * `if (CM_MLPERF_BACKEND == tf AND CM_MODEL == resnet50)` + * CM names: `--adr.['ml-model', 'tflite-model', 'resnet50-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,ml-model,efficientnet,raw,_tflite + * `if (CM_MLPERF_BACKEND in ['tflite', 'armnn_tflite'] AND CM_MODEL == efficientnet)` + * CM names: `--adr.['ml-model', 'tflite-model', 'efficientnet-model']...` + - CM script: [get-ml-model-efficientnet-lite](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-efficientnet-lite) + * get,tensorflow,lib,_tflite + - CM script: [install-tensorflow-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tensorflow-from-src) + * get,lib,armnn + * `if (CM_MLPERF_TFLITE_USE_ARMNN == yes)` + * CM names: `--adr.['armnn', 'lib-armnn']...` + - CM script: [get-lib-armnn](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-lib-armnn) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json)*** + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + * get,dataset,preprocessed,imagenet,_for.resnet50,_rgb32,_NHWC + * `if (CM_MLPERF_SKIP_RUN == no AND CM_MODEL == resnet50) AND (CM_DATASET_COMPRESSED != on)` + * CM names: `--adr.['imagenet-preprocessed', 'preprocessed-dataset']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb32,_NHWC + * `if (CM_MLPERF_SKIP_RUN == no AND CM_MODEL in ['mobilenet', 'efficientnet']) AND (CM_DATASET_COMPRESSED != on)` + * CM names: `--adr.['imagenet-preprocessed', 'preprocessed-dataset']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb8,_NHWC + * `if (CM_DATASET_COMPRESSED == on AND CM_MLPERF_SKIP_RUN == no AND CM_MODEL in ['mobilenet', 'efficientnet'])` + * CM names: `--adr.['imagenet-preprocessed', 'preprocessed-dataset']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,dataset,preprocessed,imagenet,_for.resnet50,_rgb8,_NHWC + * `if (CM_DATASET_COMPRESSED == on AND CM_MLPERF_SKIP_RUN == no AND CM_MODEL == resnet50)` + * CM names: `--adr.['imagenet-preprocessed', 'preprocessed-dataset']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json)*** + * compile,program + * `if (CM_MLPERF_SKIP_RUN != yes)` + * CM names: `--adr.['compiler-program']...` + - CM script: [compile-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-program) + * benchmark-mlperf + * `if (CM_MLPERF_SKIP_RUN != yes)` + * CM names: `--adr.['mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + +___ +### Script output +`cmr "app mlperf inference tflite-cpp [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_HW_NAME` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_CONF` +* `CM_MLPERF_DEVICE` +* `CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2` +* `CM_MLPERF_USER_CONF` \ No newline at end of file diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json b/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json new file mode 100644 index 0000000000..17caa8047a --- /dev/null +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json @@ -0,0 +1,427 @@ +{ + "alias": "app-mlperf-inference-ctuning-cpp-tflite", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Modular MLPerf inference benchmark pipeline", + "default_env": { + "CM_DATASET_COMPRESSED": "off", + "CM_DATASET_INPUT_SQUARE_SIDE": "224", + "CM_FAST_COMPILATION": "yes", + "CM_LOADGEN_BUFFER_SIZE": "1024", + "CM_MLPERF_LOADGEN_MODE": "accuracy", + "CM_MLPERF_LOADGEN_SCENARIO": "SingleStream", + "CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN": "0", + "CM_MLPERF_OUTPUT_DIR": ".", + "CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX": "tflite_cpp", + "CM_MLPERF_TFLITE_USE_NEON": "0", + "CM_MLPERF_TFLITE_USE_OPENCL": "0", + "CM_ML_MODEL_GIVEN_CHANNEL_MEANS": "123.68 116.78 103.94", + "CM_ML_MODEL_NORMALIZE_DATA": "0", + "CM_ML_MODEL_SUBTRACT_MEANS": "1", + "CM_VERBOSE": "0" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "tags": "get,sys-utils-cm" + }, + { + "enable_if_env": { + "CM_MLPERF_DEVICE": [ + "gpu" + ] + }, + "tags": "get,cuda" + }, + { + "names": [ + "loadgen" + ], + "tags": "get,loadgen" + }, + { + "names": [ + "inference-src" + ], + "tags": "get,mlcommons,inference,src" + }, + { + "enable_if_env": { + "CM_MLPERF_BACKEND": [ + "tflite", + "armnn_tflite" + ], + "CM_MODEL": [ + "mobilenet" + ] + }, + "names": [ + "ml-model", + "tflite-model", + "mobilenet-model" + ], + "tags": "get,ml-model,mobilenet,raw,_tflite" + }, + { + "enable_if_env": { + "CM_MLPERF_BACKEND": [ + "tflite", + "armnn_tflite" + ], + "CM_MODEL": [ + "resnet50" + ] + }, + "names": [ + "ml-model", + "tflite-model", + "resnet50-model" + ], + "tags": "get,ml-model,resnet50,raw,_tflite,_no-argmax" + }, + { + "enable_if_env": { + "CM_MLPERF_BACKEND": [ + "tf" + ], + "CM_MODEL": [ + "resnet50" + ] + }, + "names": [ + "ml-model", + "tflite-model", + "resnet50-model" + ], + "tags": "get,ml-model,resnet50,raw,_tf" + }, + { + "enable_if_env": { + "CM_MLPERF_BACKEND": [ + "tflite", + "armnn_tflite" + ], + "CM_MODEL": [ + "efficientnet" + ] + }, + "names": [ + "ml-model", + "tflite-model", + "efficientnet-model" + ], + "tags": "get,ml-model,efficientnet,raw,_tflite" + }, + { + "tags": "get,tensorflow,lib,_tflite" + }, + { + "enable_if_env": { + "CM_MLPERF_TFLITE_USE_ARMNN": [ + "yes" + ] + }, + "names": [ + "armnn", + "lib-armnn" + ], + "tags": "get,lib,armnn" + } + ], + "input_mapping": { + "compressed_dataset": "CM_DATASET_COMPRESSED", + "count": "CM_MLPERF_LOADGEN_QUERY_COUNT", + "mlperf_conf": "CM_MLPERF_CONF", + "mode": "CM_MLPERF_LOADGEN_MODE", + "output_dir": "CM_MLPERF_OUTPUT_DIR", + "performance_sample_count": "CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", + "scenario": "CM_MLPERF_LOADGEN_SCENARIO", + "user_conf": "CM_MLPERF_USER_CONF", + "verbose": "CM_VERBOSE" + }, + "new_env_keys": [ + "CM_MLPERF_*", + "CM_ML_MODEL_*", + "CM_HW_NAME" + ], + "new_state_keys": [ + "CM_SUT_*" + ], + "post_deps": [ + { + "names": [ + "compiler-program" + ], + "skip_if_env": { + "CM_MLPERF_SKIP_RUN": [ + "yes" + ] + }, + "tags": "compile,program" + }, + { + "names": [ + "mlperf-runner" + ], + "skip_if_env": { + "CM_MLPERF_SKIP_RUN": [ + "yes" + ] + }, + "tags": "benchmark-mlperf" + }, + { + "names": [ + "save-mlperf-inference-state" + ], + "tags": "save,mlperf,inference,state" + } + ], + "prehook_deps": [ + { + "names": [ + "user-conf-generator" + ], + "tags": "generate,user-conf,mlperf,inference" + }, + { + "enable_if_env": { + "CM_MLPERF_SKIP_RUN": [ + "no" + ], + "CM_MODEL": [ + "resnet50" + ] + }, + "names": [ + "imagenet-preprocessed", + "preprocessed-dataset" + ], + "skip_if_env": { + "CM_DATASET_COMPRESSED": [ + "on" + ] + }, + "tags": "get,dataset,preprocessed,imagenet,_for.resnet50,_rgb32,_NHWC", + "update_tags_from_env": [ + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS" + ] + }, + { + "enable_if_env": { + "CM_MLPERF_SKIP_RUN": [ + "no" + ], + "CM_MODEL": [ + "mobilenet", + "efficientnet" + ] + }, + "names": [ + "imagenet-preprocessed", + "preprocessed-dataset" + ], + "skip_if_env": { + "CM_DATASET_COMPRESSED": [ + "on" + ] + }, + "tags": "get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb32,_NHWC", + "update_tags_from_env": [ + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS" + ] + }, + { + "enable_if_env": { + "CM_DATASET_COMPRESSED": [ + "on" + ], + "CM_MLPERF_SKIP_RUN": [ + "no" + ], + "CM_MODEL": [ + "mobilenet", + "efficientnet" + ] + }, + "names": [ + "imagenet-preprocessed", + "preprocessed-dataset" + ], + "tags": "get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb8,_NHWC", + "update_tags_from_env": [ + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS" + ] + }, + { + "enable_if_env": { + "CM_DATASET_COMPRESSED": [ + "on" + ], + "CM_MLPERF_SKIP_RUN": [ + "no" + ], + "CM_MODEL": [ + "resnet50" + ] + }, + "names": [ + "imagenet-preprocessed", + "preprocessed-dataset" + ], + "tags": "get,dataset,preprocessed,imagenet,_for.resnet50,_rgb8,_NHWC", + "update_tags_from_env": [ + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS" + ] + } + ], + "tags": [ + "app", + "mlcommons", + "mlperf", + "inference", + "tflite-cpp" + ], + "tags_help": "app mlperf inference tflite-cpp", + "uid": "415904407cca404a", + "variations": { + "armnn": { + "default_variations": { + "optimization-target": "use-neon" + }, + "env": { + "CM_MLPERF_TFLITE_USE_ARMNN": "yes", + "CM_TMP_LINK_LIBS": "tensorflowlite,armnn" + } + }, + "armnn,tflite": { + "env": { + "CM_MLPERF_BACKEND": "armnn_tflite", + "CM_MLPERF_BACKEND_VERSION": "<<>>", + "CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX": "tflite_armnn_cpp", + "CM_TMP_LINK_LIBS": "tensorflowlite,armnn,armnnTfLiteParser", + "CM_TMP_SRC_FOLDER": "armnn" + } + }, + "cpu": { + "default": true, + "env": { + "CM_MLPERF_DEVICE": "cpu" + }, + "group": "device" + }, + "efficientnet": { + "env": { + "CM_MODEL": "efficientnet" + }, + "group": "model" + }, + "fp32": { + "adr": { + "ml-model": { + "tags": "_fp32" + }, + "preprocessed-dataset": { + "tags": "_float32" + } + }, + "default": true, + "env": { + "CM_MLPERF_MODEL_PRECISION": "float32" + }, + "group": "precision" + }, + "gpu": { + "env": { + "CM_MLPERF_DEVICE": "gpu", + "CM_MLPERF_DEVICE_LIB_NAMESPEC": "cudart" + }, + "group": "device" + }, + "int8": { + "adr": { + "ml-model": { + "tags": "_int8" + }, + "preprocessed-dataset": { + "tags": "_int8" + } + }, + "env": { + "CM_DATASET_COMPRESSED": "on", + "CM_MLPERF_MODEL_PRECISION": "int8" + }, + "group": "precision" + }, + "mobilenet": { + "env": { + "CM_MODEL": "mobilenet" + }, + "group": "model" + }, + "resnet50": { + "default": true, + "env": { + "CM_MODEL": "resnet50" + }, + "group": "model" + }, + "singlestream": { + "default": true, + "env": { + "CM_MLPERF_LOADGEN_SCENARIO": "SingleStream" + }, + "group": "loadgen-scenario" + }, + "tf": { + "env": { + "CM_MLPERF_BACKEND": "tf" + }, + "group": "backend" + }, + "tflite": { + "default": true, + "env": { + "CM_MLPERF_BACKEND": "tflite", + "CM_MLPERF_BACKEND_VERSION": "master", + "CM_TMP_LINK_LIBS": "tensorflowlite", + "CM_TMP_SRC_FOLDER": "src" + }, + "group": "backend" + }, + "uint8": { + "adr": { + "ml-model": { + "tags": "_uint8" + }, + "preprocessed-dataset": { + "tags": "_int8" + } + }, + "env": { + "CM_DATASET_COMPRESSED": "on", + "CM_MLPERF_MODEL_PRECISION": "uint8" + }, + "group": "precision" + }, + "use-neon": { + "env": { + "CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1": "using_neon", + "CM_MLPERF_TFLITE_USE_NEON": "1" + }, + "group": "optimization-target" + }, + "use-opencl": { + "env": { + "CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1": "using_opencl", + "CM_MLPERF_TFLITE_USE_OPENCL": "1" + }, + "group": "optimization-target" + } + } +} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp new file mode 100644 index 0000000000..c641e9d1e7 --- /dev/null +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#include +#include +#include + +#include "armnn/ArmNN.hpp" +#include "armnn/Exceptions.hpp" +#include "armnn/Tensor.hpp" +#include "armnn/INetwork.hpp" +#include "armnnTfLiteParser/ITfLiteParser.hpp" + +#include "loadgen.h" +#include "query_sample_library.h" +#include "system_under_test.h" +#include "test_settings.h" + + +#include "benchmark.h" + +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" + +using namespace std; +using namespace CK; + + +template + +class ArmNNBenchmark : public Benchmark { +public: + ArmNNBenchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr) + : Benchmark(settings, in_ptr, out_ptr) { + } +}; + +armnn::InputTensors MakeInputTensors(const std::pair& input, const void* inputTensorData) +{ + return { {input.first, armnn::ConstTensor(input.second, inputTensorData) } }; +} + +armnn::OutputTensors MakeOutputTensors(const std::pair& output, void* outputTensorData) +{ + return { {output.first, armnn::Tensor(output.second, outputTensorData) } }; +} + +class Program { +public: + Program () : runtime( armnn::IRuntime::Create(options) ) { + + bool use_neon = getenv_b("CM_MLPERF_TFLITE_USE_NEON"); + bool use_opencl = getenv_b("CM_MLPERF_TFLITE_USE_OPENCL"); + string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME"); + string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME"); + + settings = new BenchmarkSettings(MODEL_TYPE::LITE); + + session = new BenchmarkSession(settings); + + armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create(); + + // Optimize the network for a specific runtime compute device, e.g. CpuAcc, GpuAcc + //std::vector optOptions = {armnn::Compute::CpuAcc, armnn::Compute::GpuAcc}; + std::vector optOptions = {armnn::Compute::CpuRef}; + if( use_neon && use_opencl) { + optOptions = {armnn::Compute::CpuAcc, armnn::Compute::GpuAcc}; + } else if( use_neon ) { + optOptions = {armnn::Compute::CpuAcc}; + } else if( use_opencl ) { + optOptions = {armnn::Compute::GpuAcc}; + } + + cout << "\nLoading graph..." << endl; + + armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(settings->graph_file().c_str()); + if (!network) + throw "Failed to load graph from file"; + + armnnTfLiteParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo(0, input_layer_name); + armnnTfLiteParser::BindingPointInfo outputBindingInfo = parser->GetNetworkOutputBindingInfo(0, output_layer_name); + + armnn::TensorShape inShape = inputBindingInfo.second.GetShape(); + armnn::TensorShape outShape = outputBindingInfo.second.GetShape(); + std::size_t inSize = inShape[0] * inShape[1] * inShape[2] * inShape[3]; + std::size_t outSize = outShape[0] * outShape[1]; + + armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*network, optOptions, runtime->GetDeviceSpec()); + + runtime->LoadNetwork(networkIdentifier, std::move(optNet)); + + armnn::DataType input_type = inputBindingInfo.second.GetDataType(); + armnn::DataType output_type = outputBindingInfo.second.GetDataType(); + if (input_type != output_type) + throw format("Type of graph's input (%d) does not match type of its output (%d).", int(input_type), int(output_type)); + + void* input = input_type == armnn::DataType::Float32 ? (void*)new float[inSize] : (void*)new uint8_t[inSize]; + void* output = output_type == armnn::DataType::Float32 ? (void*)new float[outSize] : (void*)new uint8_t[outSize]; + + inputTensor = MakeInputTensors(inputBindingInfo, input); + outputTensor = MakeOutputTensors(outputBindingInfo, output); + + switch (input_type) { + case armnn::DataType::Float32: + if (settings->skip_internal_preprocessing) { + cout << "************* Type 1" << endl; + benchmark.reset(new ArmNNBenchmark(settings, (float*)input, (float*)output)); + } else { + cout << "************* Type 2" << endl; + benchmark.reset(new ArmNNBenchmark(settings, (float*)input, (float*)output)); + } + break; + + case armnn::DataType::QAsymmU8: + benchmark.reset(new ArmNNBenchmark(settings, (uint8_t*)input, (uint8_t*)output)); + break; + + default: + throw format("Unsupported type of graph's input: %d. " + "Supported types are: Float32 (%d), UInt8 (%d)", + int(input_type), int(armnn::DataType::Float32), int(armnn::DataType::QAsymmU8)); + } + + int out_num = outShape[0]; + int out_classes = outShape[1]; + cout << format("Output tensor dimensions: %d*%d", out_num, out_classes) << endl; + if (out_classes != settings->num_classes && out_classes != settings->num_classes+1) + throw format("Unsupported number of classes in graph's output tensor. Supported numbers are %d and %d", + settings->num_classes, settings->num_classes+1); + benchmark->has_background_class = out_classes == settings->num_classes+1; + } + + ~Program() { + } + + //bool is_available_batch() {return session? session->get_next_batch(): false; } + + void LoadNextBatch(const std::vector& img_indices) { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "LoadNextBatch(["; + for( auto idx : img_indices) { + cout << idx << ' '; + } + cout << "])" << endl; + } else if( vl ) { + cout << 'B' << flush; + } + session->load_filenames(img_indices); + benchmark->load_images( session ); + + if( vl ) { + cout << endl; + } + } + + void ColdRun() { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Triggering a Cold Run..." << endl; + } else if( vl ) { + cout << 'C' << flush; + } + + if (runtime->EnqueueWorkload(networkIdentifier, inputTensor, outputTensor) != armnn::Status::Success) + throw "Failed to invoke the classifier"; + } + + int InferenceOnce(int img_idx) { + benchmark->get_random_image( img_idx ); + + if (runtime->EnqueueWorkload(networkIdentifier, inputTensor, outputTensor) != armnn::Status::Success) + throw "Failed to invoke the classifier"; + + return benchmark->get_next_result(); + } + + void UnloadBatch(const std::vector& img_indices) { + auto b_size = img_indices.size(); + + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Unloading a batch[" << b_size << "]" << endl; + } else if( vl ) { + cout << 'U' << flush; + } + + benchmark->unload_images(b_size); + //benchmark->save_results( ); + } + + const int available_images_max() { return settings->list_of_available_imagefiles().size(); } + const int images_in_memory_max() { return settings->images_in_memory_max; } + + BenchmarkSettings *settings; +private: + BenchmarkSession *session; + unique_ptr benchmark; + armnn::NetworkId networkIdentifier; + armnn::OutputTensors outputTensor; + armnn::InputTensors inputTensor; + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime; +}; + +class SystemUnderTestSingleStream : public mlperf::SystemUnderTest { +public: + SystemUnderTestSingleStream(Program *_prg) : mlperf::SystemUnderTest() { + prg = _prg; + query_counter = 0; + }; + + ~SystemUnderTestSingleStream() override = default; + + const std::string& Name() { return name_; } + + void IssueQuery(const std::vector& samples) override { + + ++query_counter; + auto vl = prg->settings->verbosity_level; + if( vl > 1 ) { + cout << query_counter << ") IssueQuery([" << samples.size() << "]," << samples[0].id << "," << samples[0].index << ")" << endl; + } else if ( vl ) { + cout << 'Q' << flush; + } + + std::vector responses; + responses.reserve(samples.size()); + float encoding_buffer[samples.size()]; + int i=0; + for (auto s : samples) { + int predicted_class = prg->InferenceOnce(s.index); + + if( vl > 1 ) { + cout << "Query image index: " << s.index << " -> Predicted class: " << predicted_class << endl << endl; + } else if ( vl ) { + cout << 'p' << flush; + } + + /* This would be the correct way to pass in one integer index: + */ +// int single_value_buffer[] = { (int)predicted_class }; + + /* This conversion is subtly but terribly wrong + yet we use it here in order to use Guenther's parsing script: + */ + encoding_buffer[i] = (float)predicted_class; + responses.push_back({s.id, uintptr_t(&encoding_buffer[i]), sizeof(encoding_buffer[i])}); + ++i; + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + } + + void FlushQueries() override { + auto vl = prg->settings->verbosity_level; + if ( vl ) { + cout << endl; + } + } + + void ReportLatencyResults(const std::vector& latencies_ns) { + + size_t size = latencies_ns.size(); + uint64_t avg = accumulate(latencies_ns.begin(), latencies_ns.end(), uint64_t(0) )/size; + + std::vector sorted_lat(latencies_ns.begin(), latencies_ns.end()); + sort(sorted_lat.begin(), sorted_lat.end()); + + cout << endl << "------------------------------------------------------------"; + cout << endl << "| LATENCIES (in nanoseconds and fps) |"; + cout << endl << "------------------------------------------------------------"; + size_t p50 = size * 0.5; + size_t p90 = size * 0.9; + cout << endl << "Number of queries run: " << size; + cout << endl << "Min latency: " << sorted_lat[0] << "ns (" << 1e9/sorted_lat[0] << " fps)"; + cout << endl << "Median latency: " << sorted_lat[p50] << "ns (" << 1e9/sorted_lat[p50] << " fps)"; + cout << endl << "Average latency: " << avg << "ns (" << 1e9/avg << " fps)"; + cout << endl << "90 percentile latency: " << sorted_lat[p90] << "ns (" << 1e9/sorted_lat[p90] << " fps)"; + + if(!prg->settings->trigger_cold_run) { + cout << endl << "First query (cold model) latency: " << latencies_ns[0] << "ns (" << 1e9/latencies_ns[0] << " fps)"; + } + cout << endl << "Max latency: " << sorted_lat[size-1] << "ns (" << 1e9/sorted_lat[size-1] << " fps)"; + cout << endl << "------------------------------------------------------------ " << endl; + } + +private: + std::string name_{"TFLite_SUT"}; + Program *prg; + long query_counter; +}; + +class QuerySampleLibrarySingleStream : public mlperf::QuerySampleLibrary { +public: + QuerySampleLibrarySingleStream(Program *_prg) : mlperf::QuerySampleLibrary() { + prg = _prg; + }; + + ~QuerySampleLibrarySingleStream() = default; + + const std::string& Name() override { return name_; } + + size_t TotalSampleCount() override { return prg->available_images_max(); } + + size_t PerformanceSampleCount() override { return prg->images_in_memory_max(); } + + void LoadSamplesToRam( const std::vector& samples) override { + prg->LoadNextBatch(samples); + return; + } + + void UnloadSamplesFromRam( const std::vector& samples) override { + prg->UnloadBatch(samples); + return; + } + +private: + std::string name_{"TFLite_QSL"}; + Program *prg; +}; + +void TestSingleStream(Program *prg) { + SystemUnderTestSingleStream sut(prg); + QuerySampleLibrarySingleStream qsl(prg); + + const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF"); + const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF"); + + std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model"); + std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", ""); + + const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO"); + const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE"); + + std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl; + std::cout << "Path to user.conf : " << user_conf_path << std::endl; + std::cout << "Model Name: " << model_name << std::endl; + std::cout << "LoadGen Scenario: " << scenario_string << std::endl; + std::cout << "LoadGen Mode: " << ( mode_string != "" ? mode_string : "(empty string)" ) << std::endl; + + mlperf::TestSettings ts; + + // This should have been done automatically inside ts.FromConfig() ! + ts.scenario = ( scenario_string == "SingleStream") ? mlperf::TestScenario::SingleStream + : ( scenario_string == "MultiStream") ? mlperf::TestScenario::MultiStream + : ( scenario_string == "Server") ? mlperf::TestScenario::Server + : ( scenario_string == "Offline") ? mlperf::TestScenario::Offline : mlperf::TestScenario::SingleStream; + + if( mode_string != "") + ts.mode = ( mode_string == "SubmissionRun") ? mlperf::TestMode::SubmissionRun + : ( mode_string == "accuracy") ? mlperf::TestMode::AccuracyOnly + : ( mode_string == "performance") ? mlperf::TestMode::PerformanceOnly + : ( mode_string == "findpeakperformance") ? mlperf::TestMode::FindPeakPerformance : mlperf::TestMode::SubmissionRun; + + if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) { + std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path << std::endl; + exit(1); + } + + if (ts.FromConfig(user_conf_path, model_name, scenario_string)) { + std::cout << "Issue with user.conf file at " << user_conf_path << std::endl; + exit(1); + } + + mlperf::LogSettings log_settings; + log_settings.log_output.outdir = logs_dir; + log_settings.log_output.prefix_with_datetime = false; + log_settings.enable_trace = false; + + + if (prg->settings->trigger_cold_run) { + prg->ColdRun(); + } + + mlperf::StartTest(&sut, &qsl, ts, log_settings); +} + +int main(int argc, char* argv[]) { + try { + Program *prg = new Program(); + TestSingleStream(prg); + delete prg; + } + catch (const string& error_message) { + cerr << "ERROR: " << error_message << endl; + return -1; + } + return 0; +} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py new file mode 100644 index 0000000000..ebd588c9f2 --- /dev/null +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py @@ -0,0 +1,96 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + if 'CM_MODEL' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + source_files = [] + script_path = i['run_script_input']['path'] + + env['CM_SOURCE_FOLDER_PATH'] = os.path.join(script_path, env['CM_TMP_SRC_FOLDER']) + + for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + if file.endswith(".c") or file.endswith(".cpp"): + source_files.append(file) + + env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + + if '+CPLUS_INCLUDE_PATH' not in env: + env['+CPLUS_INCLUDE_PATH'] = [] + + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + + # TODO: get cuda path ugly fix + if env['CM_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + env['+ CXXFLAGS'].append("-std=c++17") + + # add preprocessor flag like "#define CM_MODEL_RESNET50" + env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) + # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + env['CM_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + env['CM_MLPERF_DEVICE'].upper()) + + if '+ LDCXXFLAGS' not in env: + env['+ LDCXXFLAGS'] = [ ] + + env['+ LDCXXFLAGS'] += [ + "-lmlperf_loadgen", + "-lpthread" + ] + # e.g. -lonnxruntime + if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + # e.g. -lcudart + if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + + if env.get('CM_TMP_LINK_LIBS', []): + libs = env['CM_TMP_LINK_LIBS'].split(",") + for lib in libs: + env['+ LDCXXFLAGS'].append(' -l'+lib) + + env['CM_LINKER_LANG'] = 'CXX' + env['CM_RUN_DIR'] = os.getcwd() + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + if env.get('CM_DATASET_COMPRESSED', "no").lower() in [ "yes", "on", "true"] and "float" in env.get('CM_MLPERF_MODEL_PRECISION', ''): + env['CM_HOST_USE_ALL_CORES'] = "yes" #Use all cores for input preprocessing + env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing" + + return {'return':0} + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return':0} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h b/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h new file mode 100644 index 0000000000..76f1209a80 --- /dev/null +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG(msg) std::cout << "DEBUG: " << msg << std::endl; + +namespace CK { + +enum MODEL_TYPE { + LITE, + TF_FROZEN +}; + + +/// Load mandatory string value from the environment. +inline std::string getenv_s(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return std::string(value); +} + +inline std::string getenv_opt_s(const std::string& name, const std::string default_value) { + const char *value = getenv(name.c_str()); + if (!value) + return default_value; + else + return std::string(value); +} + + +/// Load mandatory integer value from the environment. +inline int getenv_i(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atoi(value); +} + +/// Load mandatory float value from the environment. +inline float getenv_f(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atof(value); +} + +/// Load an optional boolean value from the environment. +inline bool getenv_b(const char *name) { + std::string value = getenv(name); + + return (value == "YES" || value == "yes" || value == "ON" || value == "on" || value == "1"); +} + +/// Dummy `sprintf` like formatting function using std::string. +/// It uses buffer of fixed length so can't be used in any cases, +/// generally use it for short messages with numeric arguments. +template +inline std::string format(const char* str, Args ...args) { + char buf[1024]; + sprintf(buf, str, args...); + return std::string(buf); +} + +//---------------------------------------------------------------------- + +class Accumulator { +public: + void reset() { _total = 0, _count = 0; } + void add(float value) { _total += value, _count++; } + float total() const { return _total; } + float avg() const { return _total / static_cast(_count); } +private: + float _total = 0; + int _count = 0; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSettings { +public: + const std::string images_dir = getenv_s("CM_DATASET_PREPROCESSED_PATH"); + const std::string available_images_file = getenv_s("CM_DATASET_PREPROCESSED_IMAGES_LIST"); + const bool skip_internal_preprocessing = (getenv_opt_s("CM_DATASET_COMPRESSED", "off") == "off"); + const std::string result_dir = getenv_s("CM_MLPERF_OUTPUT_DIR"); + const std::string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME"); + const std::string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME"); + const int images_in_memory_max = getenv_i("CM_LOADGEN_BUFFER_SIZE"); + const int image_size = getenv_i("CM_DATASET_INPUT_SQUARE_SIDE"); + const int batch_size = 1; + const int num_channels = 3; + const int num_classes = 1000; + const bool normalize_img = getenv_b("CM_ML_MODEL_NORMALIZE_DATA"); + + const bool subtract_mean = getenv_b("CM_ML_MODEL_SUBTRACT_MEANS"); + const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS"); + + const bool trigger_cold_run = getenv_b("CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN"); + + const int verbosity_level = getenv_i("CM_VERBOSE"); + + BenchmarkSettings(enum MODEL_TYPE mode = MODEL_TYPE::LITE) { + + if (given_channel_means_str) { + std::stringstream ss(given_channel_means_str); + for(int i=0;i<3;i++){ + ss >> given_channel_means[i]; + } + } + + switch (mode) + { + case MODEL_TYPE::LITE: + _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH"); + break; + + case MODEL_TYPE::TF_FROZEN: + _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH"); + break; + + default: + std::cout << "Unsupported MODEL_TYPE" << std::endl; + exit(-1); + break; + }; + _number_of_threads = std::thread::hardware_concurrency(); + + if (getenv_opt_s("CM_HOST_USE_ALL_CORES", "no") != "yes") { + _number_of_threads = _number_of_threads < 1 ? 1 : _number_of_threads; + _number_of_threads = !getenv("CM_HOST_CPU_TOTAL_CORES") + ? _number_of_threads + : getenv_i("CM_HOST_CPU_TOTAL_CORES"); + if (getenv_i("CM_HOST_CPU_TOTAL_CORES") && getenv_i("CM_HOST_CPU_THREADS_PER_CORE")) { + _number_of_threads = getenv_i("CM_HOST_CPU_TOTAL_CORES") / getenv_i("CM_HOST_CPU_THREADS_PER_CORE"); + } + } + // Print settings + std::cout << "Graph file: " << _graph_file << std::endl; + std::cout << "Image dir: " << images_dir << std::endl; + std::cout << "Image list: " << available_images_file << std::endl; + std::cout << "Image size: " << image_size << std::endl; + std::cout << "Image channels: " << num_channels << std::endl; + std::cout << "Prediction classes: " << num_classes << std::endl; + std::cout << "Result dir: " << result_dir << std::endl; + std::cout << "How many images fit in memory buffer: " << images_in_memory_max << std::endl; + std::cout << "Normalize: " << normalize_img << std::endl; + std::cout << "Subtract mean: " << subtract_mean << std::endl; + std::cout << "Run time preprocessing: " << !skip_internal_preprocessing << std::endl; + std::cout << "Number of Threads: " << _number_of_threads << std::endl; + if(subtract_mean && given_channel_means_str) + std::cout << "Per-channel means to subtract: " << given_channel_means[0] + << ", " << given_channel_means[1] + << ", " << given_channel_means[2] << std::endl; + + // Create results dir if none + auto dir = opendir(result_dir.c_str()); + if (dir) + closedir(dir); + else + system(("mkdir " + result_dir).c_str()); + + // Load list of images to be processed + std::ifstream file(available_images_file); + if (!file) + throw "Unable to open the available image list file " + available_images_file; + for (std::string s; !getline(file, s).fail();) + _available_image_list.emplace_back(s); + std::cout << "Number of available imagefiles: " << _available_image_list.size() << std::endl; + } + + const std::vector& list_of_available_imagefiles() const { return _available_image_list; } + + std::vector _available_image_list; + + int number_of_threads() { return _number_of_threads; } + + std::string graph_file() { return _graph_file; } + + float given_channel_means[3]; +private: + int _number_of_threads; + std::string _graph_file; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSession { +public: + BenchmarkSession(const BenchmarkSettings* settings): _settings(settings) { + } + + virtual ~BenchmarkSession() {} + + const std::vector& load_filenames(std::vector img_indices) { + _filenames_buffer.clear(); + _filenames_buffer.reserve( img_indices.size() ); + idx2loc.clear(); + + auto list_of_available_imagefiles = _settings->list_of_available_imagefiles(); + auto count_available_imagefiles = list_of_available_imagefiles.size(); + + int loc=0; + for (auto idx : img_indices) { + if(idx& current_filenames() const { return _filenames_buffer; } + + std::map idx2loc; + +private: + const BenchmarkSettings* _settings; + std::vector _filenames_buffer; +}; + +//---------------------------------------------------------------------- + +template +class StaticBuffer { +public: + StaticBuffer(int size, const std::string& dir): _size(size), _dir(dir) { + _buffer = new TData[size]; + } + + virtual ~StaticBuffer() { + delete[] _buffer; + } + + TData* data() const { return _buffer; } + int size() const { return _size; } + +protected: + const int _size; + const std::string _dir; + TData* _buffer; +}; + +//---------------------------------------------------------------------- + +class ImageData : public StaticBuffer { +public: + ImageData(const BenchmarkSettings* s): StaticBuffer( + s->image_size * s->image_size * s->num_channels * (s->skip_internal_preprocessing ? sizeof(float) : sizeof(uint8_t)), + s->images_dir) {} + + void load(const std::string& filepath, int vl) { + //auto path = _dir + '/' + filename; + auto path = filepath; + std::ifstream file(path, std::ios::in | std::ios::binary); + if (!file) throw "Failed to open image data " + path; + file.read(reinterpret_cast(_buffer), _size); + if( vl > 1) { + std::cout << "Loaded file: " << path << std::endl; + } else if ( vl ) { + std::cout << 'l' << std::flush; + } + } +}; + +//---------------------------------------------------------------------- + +class ResultData : public StaticBuffer { +public: + ResultData(const BenchmarkSettings* s): StaticBuffer( + s->num_classes, s->result_dir) {} + + void save(const std::string& filename) { + auto path = _dir + '/' + filename + ".txt"; + std::ofstream file(path); + if (!file) throw "Unable to create result file " + path; + for (int i = 0; i < _size; i++) + file << _buffer[i] << std::endl; + } + + int argmax() { + int arg_index = 0; + float max_value = _buffer[0]; + + for (int i = 1; i < _size; i++) { + if (_buffer[i] > max_value) { + arg_index = i; + max_value = _buffer[i]; + } + } + + return arg_index; + } +}; + +//---------------------------------------------------------------------- + +class IBenchmark { +public: + bool has_background_class = false; + + virtual ~IBenchmark() {} + virtual void load_images(BenchmarkSession *session) = 0; + virtual void unload_images(size_t num_examples) = 0; + virtual void save_results() = 0; + virtual int get_next_result() = 0; + virtual void get_random_image(int img_idx) = 0; +}; + + +template +class Benchmark : public IBenchmark { +public: + Benchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr): _settings(settings) { + _in_ptr = in_ptr; + _out_ptr = out_ptr; + _in_converter.reset(new TInConverter(settings)); + _out_converter.reset(new TOutConverter(settings)); + } + + void load_images(BenchmarkSession *_session) override { + session = _session; + auto vl = _settings->verbosity_level; + + const std::vector& image_filenames = session->current_filenames(); + + int length = image_filenames.size(); + _current_buffer_size = length; + _in_batch = new std::unique_ptr[length]; + _out_batch = new std::unique_ptr[length]; + int i = 0; + for (auto image_file : image_filenames) { + _in_batch[i].reset(new ImageData(_settings)); + _out_batch[i].reset(new ResultData(_settings)); + _in_batch[i]->load(image_file, vl); + i++; + } + } + + void unload_images(size_t num_examples) override { + for(size_t i=0;iconvert(_in_batch[ session->idx2loc[img_idx] ].get(), _in_ptr); + } + + int get_next_result() override { + int probe_offset = has_background_class ? 1 : 0; + ResultData *next_result_ptr = _out_batch[_out_buffer_index++].get(); + _out_converter->convert(_out_ptr + probe_offset, next_result_ptr); + _out_buffer_index %= _current_buffer_size; + return next_result_ptr->argmax(); + } + + void save_results() override { + const std::vector& image_filenames = session->current_filenames(); + int i = 0; + for (auto image_file : image_filenames) { + _out_batch[i++]->save(image_file); + } + } + +private: + const BenchmarkSettings* _settings; + BenchmarkSession* session; + int _out_buffer_index = 0; + int _current_buffer_size = 0; + TData* _in_ptr; + TData* _out_ptr; + std::unique_ptr *_in_batch; + std::unique_ptr *_out_batch; + std::unique_ptr _in_converter; + std::unique_ptr _out_converter; +}; + +//---------------------------------------------------------------------- + +class IinputConverter { +public: + virtual ~IinputConverter() {} + virtual void convert(const ImageData* source, void* target) = 0; +}; + +//---------------------------------------------------------------------- + +class InCopy : public IinputConverter { +public: + InCopy(const BenchmarkSettings* s) {} + + void convert(const ImageData* source, void* target) { + uint8_t *uint8_target = static_cast(target); + std::copy(source->data(), source->data() + source->size(), uint8_target); + } +}; + +//---------------------------------------------------------------------- + +class InNormalize : public IinputConverter { +public: + InNormalize(const BenchmarkSettings* s): + _normalize_img(s->normalize_img), + _subtract_mean(s->subtract_mean), + _given_channel_means(s->given_channel_means), + _num_channels(s->num_channels) { + } + + void convert(const ImageData* source, void* target) { + // Copy image data to target + float *float_target = static_cast(target); + float sum = 0; + for (int i = 0; i < source->size(); i++) { + float px = source->data()[i]; + if (_normalize_img) + px = (px / 255.0 - 0.5) * 2.0; + sum += px; + float_target[i] = px; + } + // Subtract mean value if required + if (_subtract_mean) { + if(_given_channel_means) { + for (int i = 0; i < source->size(); i++){ + float_target[i] -= _given_channel_means[i % _num_channels]; // assuming NHWC order! + } + } else { + float mean = sum / static_cast(source->size()); + for (int i = 0; i < source->size(); i++) + float_target[i] -= mean; + } + } + } + +private: + const bool _normalize_img; + const bool _subtract_mean; + const float *_given_channel_means; + const int _num_channels; +}; + +//---------------------------------------------------------------------- + +class OutCopy { +public: + OutCopy(const BenchmarkSettings* s) {} + + void convert(const float* source, ResultData* target) const { + std::copy(source, source + target->size(), target->data()); + } +}; + +//---------------------------------------------------------------------- + +class OutDequantize { +public: + OutDequantize(const BenchmarkSettings* s) {} + + void convert(const uint8_t* source, ResultData* target) const { + for (int i = 0; i < target->size(); i++) + target->data()[i] = source[i] / 255.0; + } +}; + +} // namespace CK diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp new file mode 100644 index 0000000000..9493f5430e --- /dev/null +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#include +#include +#include + +#include "loadgen.h" +#include "query_sample_library.h" +#include "system_under_test.h" +#include "test_settings.h" + + +#include "benchmark.h" + +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" + +using namespace std; +using namespace CK; + + +template +class TFLiteBenchmark : public Benchmark { +public: + TFLiteBenchmark(const BenchmarkSettings* settings, tflite::Interpreter* interpreter, int input_index) + : Benchmark( + settings, interpreter->typed_tensor(input_index), interpreter->typed_output_tensor(0)) { + } +}; + +class Program { +public: + Program () { + settings = new BenchmarkSettings(MODEL_TYPE::LITE); + + session = new BenchmarkSession(settings); + + cout << "\nLoading graph..." << endl; + + model = tflite::FlatBufferModel::BuildFromFile(settings->graph_file().c_str()); + if (!model) + throw "Failed to load graph from file " + settings->graph_file(); + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder(*model, resolver)(&interpreter); + if (!interpreter) + throw string("Failed to construct interpreter"); + if (interpreter->AllocateTensors() != kTfLiteOk) + throw string("Failed to allocate tensors"); + + interpreter->SetNumThreads(settings->number_of_threads()); + + int input_index = interpreter->inputs()[0]; + int output_index = interpreter->outputs()[0]; + auto input_type = interpreter->tensor(input_index)->type; + auto output_type = interpreter->tensor(output_index)->type; + if (input_type != output_type) + throw format("Type of graph's input (%d) does not match type of its output (%d).", + int(input_type), int(output_type)); + + switch (input_type) { + case kTfLiteFloat32: + if (settings->skip_internal_preprocessing) + benchmark.reset(new TFLiteBenchmark(settings, interpreter.get(), input_index)); + else + benchmark.reset(new TFLiteBenchmark(settings, interpreter.get(), input_index)); + break; + + case kTfLiteUInt8: + benchmark.reset(new TFLiteBenchmark(settings, interpreter.get(), input_index)); + break; + + default: + throw format("Unsupported type of graph's input: %d. " + "Supported types are: Float32 (%d), UInt8 (%d)", + int(input_type), int(kTfLiteFloat32), int(kTfLiteUInt8)); + } + + TfLiteIntArray* in_dims = interpreter->tensor(input_index)->dims; + int in_num = in_dims->data[0]; + int in_height = in_dims->data[1]; + int in_width = in_dims->data[2]; + int in_channels = in_dims->data[3]; + cout << format("Input tensor dimensions (NHWC): %d*%d*%d*%d", in_num, in_height, in_width, in_channels) << endl; + if (in_height != settings->image_size || + in_width != settings->image_size || + in_channels != settings->num_channels) + throw format("Dimensions of graph's input do not correspond to dimensions of input image (%d*%d*%d*%d)", + settings->batch_size, settings->image_size, settings->image_size, settings->num_channels); + + TfLiteIntArray* out_dims = interpreter->tensor(output_index)->dims; + int out_num = out_dims->data[0]; + int out_classes = out_dims->data[1]; + cout << format("Output tensor dimensions: %d*%d", out_num, out_classes) << endl; + if (out_classes != settings->num_classes && out_classes != settings->num_classes+1) + throw format("Unsupported number of classes in graph's output tensor. Supported numbers are %d and %d", + settings->num_classes, settings->num_classes+1); + benchmark->has_background_class = out_classes == settings->num_classes+1; + } + + ~Program() { + } + + //bool is_available_batch() {return session? session->get_next_batch(): false; } + + void LoadNextBatch(const std::vector& img_indices) { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "LoadNextBatch(["; + for( auto idx : img_indices) { + cout << idx << ' '; + } + cout << "])" << endl; + } else if( vl ) { + cout << 'B' << flush; + } + session->load_filenames(img_indices); + benchmark->load_images( session ); + if( vl ) { + cout << endl; + } + } + + void ColdRun() { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Triggering a Cold Run..." << endl; + } else if( vl ) { + cout << 'C' << flush; + } + + if (interpreter->Invoke() != kTfLiteOk) + throw "Failed to invoke tflite"; + } + + int InferenceOnce(int img_idx) { + benchmark->get_random_image( img_idx ); + if (interpreter->Invoke() != kTfLiteOk) + throw "Failed to invoke tflite"; + return benchmark->get_next_result(); + } + + void UnloadBatch(const std::vector& img_indices) { + auto b_size = img_indices.size(); + + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Unloading a batch[" << b_size << "]" << endl; + } else if( vl ) { + cout << 'U' << flush; + } + + benchmark->unload_images(b_size); + //benchmark->save_results( ); + } + + const int available_images_max() { return settings->list_of_available_imagefiles().size(); } + const int images_in_memory_max() { return settings->images_in_memory_max; } + + BenchmarkSettings *settings; +private: + BenchmarkSession *session; + unique_ptr benchmark; + unique_ptr interpreter; + unique_ptr model; +}; + + +class SystemUnderTestSingleStream : public mlperf::SystemUnderTest { +public: + SystemUnderTestSingleStream(Program *_prg) : mlperf::SystemUnderTest() { + prg = _prg; + query_counter = 0; + }; + + ~SystemUnderTestSingleStream() override = default; + + const std::string& Name() { return name_; } + + void IssueQuery(const std::vector& samples) override { + + ++query_counter; + auto vl = prg->settings->verbosity_level; + if( vl > 1 ) { + cout << query_counter << ") IssueQuery([" << samples.size() << "]," << samples[0].id << "," << samples[0].index << ")" << endl; + } else if ( vl ) { + cout << 'Q' << flush; + } + + std::vector responses; + responses.reserve(samples.size()); + float encoding_buffer[samples.size()]; + int i=0; + for (auto s : samples) { + int predicted_class = prg->InferenceOnce(s.index); + + if( vl > 1 ) { + cout << "Query image index: " << s.index << " -> Predicted class: " << predicted_class << endl << endl; + } else if ( vl ) { + cout << 'p' << flush; + } + + /* This would be the correct way to pass in one integer index: + */ +// int single_value_buffer[] = { (int)predicted_class }; + + /* This conversion is subtly but terribly wrong + yet we use it here in order to use Guenther's parsing script: + */ + encoding_buffer[i] = (float)predicted_class; + responses.push_back({s.id, uintptr_t(&encoding_buffer[i]), sizeof(encoding_buffer[i])}); + ++i; + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + } + + void FlushQueries() override { + auto vl = prg->settings->verbosity_level; + if ( vl ) { + cout << endl; + } + } + + void ReportLatencyResults(const std::vector& latencies_ns) { + + size_t size = latencies_ns.size(); + uint64_t avg = accumulate(latencies_ns.begin(), latencies_ns.end(), uint64_t(0) )/size; + + std::vector sorted_lat(latencies_ns.begin(), latencies_ns.end()); + sort(sorted_lat.begin(), sorted_lat.end()); + + cout << endl << "------------------------------------------------------------"; + cout << endl << "| LATENCIES (in nanoseconds and fps) |"; + cout << endl << "------------------------------------------------------------"; + size_t p50 = size * 0.5; + size_t p90 = size * 0.9; + cout << endl << "Number of queries run: " << size; + cout << endl << "Min latency: " << sorted_lat[0] << "ns (" << 1e9/sorted_lat[0] << " fps)"; + cout << endl << "Median latency: " << sorted_lat[p50] << "ns (" << 1e9/sorted_lat[p50] << " fps)"; + cout << endl << "Average latency: " << avg << "ns (" << 1e9/avg << " fps)"; + cout << endl << "90 percentile latency: " << sorted_lat[p90] << "ns (" << 1e9/sorted_lat[p90] << " fps)"; + + if(!prg->settings->trigger_cold_run) { + cout << endl << "First query (cold model) latency: " << latencies_ns[0] << "ns (" << 1e9/latencies_ns[0] << " fps)"; + } + cout << endl << "Max latency: " << sorted_lat[size-1] << "ns (" << 1e9/sorted_lat[size-1] << " fps)"; + cout << endl << "------------------------------------------------------------ " << endl; + } + +private: + std::string name_{"TFLite_SUT"}; + Program *prg; + long query_counter; +}; + +class QuerySampleLibrarySingleStream : public mlperf::QuerySampleLibrary { +public: + QuerySampleLibrarySingleStream(Program *_prg) : mlperf::QuerySampleLibrary() { + prg = _prg; + }; + + ~QuerySampleLibrarySingleStream() = default; + + const std::string& Name() override { return name_; } + + size_t TotalSampleCount() override { return prg->available_images_max(); } + + size_t PerformanceSampleCount() override { return prg->images_in_memory_max(); } + + void LoadSamplesToRam( const std::vector& samples) override { + prg->LoadNextBatch(samples); + return; + } + + void UnloadSamplesFromRam( const std::vector& samples) override { + prg->UnloadBatch(samples); + return; + } + +private: + std::string name_{"TFLite_QSL"}; + Program *prg; +}; + +void TestSingleStream(Program *prg) { + SystemUnderTestSingleStream sut(prg); + QuerySampleLibrarySingleStream qsl(prg); + + const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF"); + const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF"); + const std::string audit_conf_path = getenv_opt_s("CM_MLPERF_INFERENCE_AUDIT_PATH",""); + + std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model"); + std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", ""); + + const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO"); + const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE"); + + std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl; + std::cout << "Path to user.conf : " << user_conf_path << std::endl; + std::cout << "Model Name: " << model_name << std::endl; + std::cout << "LoadGen Scenario: " << scenario_string << std::endl; + std::cout << "LoadGen Mode: " << ( mode_string != "" ? mode_string : "(empty string)" ) << std::endl; + + mlperf::TestSettings ts; + + // This should have been done automatically inside ts.FromConfig() ! + ts.scenario = ( scenario_string == "SingleStream") ? mlperf::TestScenario::SingleStream + : ( scenario_string == "MultiStream") ? mlperf::TestScenario::MultiStream + : ( scenario_string == "Server") ? mlperf::TestScenario::Server + : ( scenario_string == "Offline") ? mlperf::TestScenario::Offline : mlperf::TestScenario::SingleStream; + + if( mode_string != "") + ts.mode = ( mode_string == "SubmissionRun") ? mlperf::TestMode::SubmissionRun + : ( mode_string == "accuracy") ? mlperf::TestMode::AccuracyOnly + : ( mode_string == "performance") ? mlperf::TestMode::PerformanceOnly + : ( mode_string == "findpeakperformance") ? mlperf::TestMode::FindPeakPerformance : mlperf::TestMode::SubmissionRun; + + if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) { + std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path << std::endl; + exit(1); + } + + if (ts.FromConfig(user_conf_path, model_name, scenario_string)) { + std::cout << "Issue with user.conf file at " << user_conf_path << std::endl; + exit(1); + } + + mlperf::LogSettings log_settings; + log_settings.log_output.outdir = logs_dir; + log_settings.log_output.prefix_with_datetime = false; + log_settings.enable_trace = false; + + + if (prg->settings->trigger_cold_run) { + prg->ColdRun(); + } + + mlperf::StartTest(&sut, &qsl, ts, log_settings, audit_conf_path); +} + +int main(int argc, char* argv[]) { + try { + Program *prg = new Program(); + TestSingleStream(prg); + delete prg; + } + catch (const string& error_message) { + cerr << "ERROR: " << error_message << endl; + return -1; + } + return 0; +} diff --git a/script/app-mlperf-inference-dummy/README.md b/script/app-mlperf-inference-dummy/README.md new file mode 100644 index 0000000000..1b0b931bf8 --- /dev/null +++ b/script/app-mlperf-inference-dummy/README.md @@ -0,0 +1,361 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference-dummy** + +Category: **Modular MLPerf benchmarks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference-dummy,5b71627383a94576) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy` + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy[,variations] [--input_flags]` + +*or* + +`cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy"` + +`cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce mlcommons mlperf inference harness dummy-harness dummy[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_bert_` + - Workflow: + * `_gptj_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,gptj + * CM names: `--adr.['gptj-model']...` + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + * get,dataset,cnndm,_validation + - CM script: [get-dataset-cnndm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm) + * `_llama2-70b_` + - Workflow: + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_pytorch,cpu` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_pytorch,cuda` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch_cuda + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_singlestream,resnet50` + - Workflow: + * `_singlestream,retinanet` + - Workflow: + +
+ + + * Group "**backend**" +
+ Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_bs.#` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * `_offline` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - Workflow: + * `_gptj-99` + - Environment variables: + - *CM_MODEL*: `gptj-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_gptj-99.9` + - Environment variables: + - *CM_MODEL*: `gptj-99.9` + - Workflow: + * `_llama2-70b-99` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99` + - Workflow: + * `_llama2-70b-99.9` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99.9` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_fp16` + - Workflow: + * `_fp32` + - Workflow: + * `_uint8` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_pytorch,_resnet50` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--rerun=value` → `CM_RERUN=value` +* `--results_repo=value` → `CM_MLPERF_INFERENCE_RESULTS_REPO=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "count":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_MLPERF_LOADGEN_MODE: `performance` +* CM_SKIP_PREPROCESS_DATASET: `no` +* CM_SKIP_MODEL_DOWNLOAD: `no` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `dummy_harness` +* CM_MLPERF_SKIP_RUN: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlcommons,inference,loadgen + * CM names: `--adr.['inference-loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + * get,generic-python-lib,_mlperf_logging + * CM names: `--adr.['mlperf-logging']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,git,repo + * CM names: `--adr.inference-results inference-code...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-dummy/_cm.yaml)*** + * benchmark-mlperf + * `if (CM_MLPERF_SKIP_RUN not in ['yes', True])` + * CM names: `--adr.['runner', 'mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + +___ +### Script output +`cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_IMAGENET_ACCURACY_DTYPE` +* `CM_MAX_EXAMPLES` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +* `CM_SQUAD_ACCURACY_DTYPE` +#### New environment keys auto-detected from customize diff --git a/script/app-mlperf-inference-dummy/_cm.yaml b/script/app-mlperf-inference-dummy/_cm.yaml new file mode 100644 index 0000000000..00b99e54dd --- /dev/null +++ b/script/app-mlperf-inference-dummy/_cm.yaml @@ -0,0 +1,291 @@ +# Identification of this CM script +alias: app-mlperf-inference-dummy +uid: 5b71627383a94576 +cache: false + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - dummy-harness + - dummy + +# Default environment +default_env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: dummy_harness + CM_MLPERF_SKIP_RUN: 'no' + +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download MLPerf inference loadgen + - tags: get,mlcommons,inference,loadgen + names: + - inference-loadgen + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + - tags: get,git,repo + names: + inference-results + inference-code + updats_tags_from_env_with_prefix: + _repo.: CM_MLPERF_INFERENCE_RESULTS_REPO + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO + extra_cache_tags: inference-implementation,mlperf + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + pytorch: + group: backend + default: true + env: + CM_MLPERF_BACKEND: pytorch + + pytorch,cuda: + deps: + - tags: get,generic-python-lib,_torch_cuda + + pytorch,cpu: + deps: + - tags: get,generic-python-lib,_torch + + bs.#: + group: batch-size + + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + base: + - bs.1 + env: + CM_MODEL: retinanet + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + gptj_: + deps: + - tags: get,ml-model,gptj + names: + - gptj-model + - tags: get,dataset,cnndm,_validation + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + + llama2-70b_: + {} + + llama2-70b-99: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99 + + llama2-70b-99.9: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99.9 + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + + singlestream,resnet50: + default_variations: + batch-size: bs.1 + + singlestream,retinanet: + default_variations: + batch-size: bs.1 + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + uint8: + group: precision + fp16: + group: precision + fp32: + group: precision + +docker: + docker_real_run: False diff --git a/script/app-mlperf-inference-dummy/customize.py b/script/app-mlperf-inference-dummy/customize.py new file mode 100644 index 0000000000..a462e26d68 --- /dev/null +++ b/script/app-mlperf-inference-dummy/customize.py @@ -0,0 +1,60 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + if 'CM_MODEL' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + r = get_run_cmd(env['CM_MODEL'], i) + if r['return'] > 0: + return r + run_cmd = r['run_cmd'] + run_dir = r ['run_dir'] + print(run_cmd) + print(run_dir) + return {'return':1} + +def get_run_cmd(model, i): + env = i['env'] + if "gptj" in model: + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + device = env['CM_MLPERF_DEVICE'] + mode = env['CM_MLPERF_LOADGEN_MODE'] + outdir = env['CM_MLPERF_OUTPUT_DIR'] + mlperf_conf_path = env['CM_MLPERF_CONF'] + user_conf_path = env['CM_MLPERF_USER_CONF'] + api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost') + model_path = env['GPTJ_CHECKPOINT_PATH'] + dataset_path = env['CM_DATASET_CNNDM_EVAL_PATH'] + precision = env['CM_MLPERF_MODEL_PRECISION'] + if mode == "accuracy": + accuracy_string = " --accuracy " + else: + accuracy_string = "" + + run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " + submitter = "CTuning" + run_dir = os.path.join(env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", "gptj-99") + + return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/app-mlperf-inference-dummy/run.sh b/script/app-mlperf-inference-dummy/run.sh new file mode 100644 index 0000000000..ddcd0b5504 --- /dev/null +++ b/script/app-mlperf-inference-dummy/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/script/app-mlperf-inference-intel/README.md b/script/app-mlperf-inference-intel/README.md new file mode 100644 index 0000000000..09ad18c2c7 --- /dev/null +++ b/script/app-mlperf-inference-intel/README.md @@ -0,0 +1,613 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference-intel** + +Category: **Modular MLPerf benchmarks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference-intel,c05a90433bb04cc1) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel` + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel[,variations] [--input_flags]` + +*or* + +`cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel"` + +`cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_bert_` + - Environment variables: + - *CM_BENCHMARK*: `STANDALONE_BERT` + - *dataset_squad_tokenized_max_seq_length*: `384` + - *loadgen_buffer_size*: `10833` + - *loadgen_dataset_size*: `10833` + - Workflow: + * `_build-harness,bert_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-sys-util,_rsync + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,dataset,original,squad + * CM names: `--adr.['squad-original']...` + - CM script: [get-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad) + * get,ml-model,bert-large,_pytorch,_int8 + * CM names: `--adr.['bert-large', 'ml-model']...` + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + * get,generic-python-lib,_package.tokenization + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_calibration,gptj_` + - Workflow: + * `_gptj_` + - Environment variables: + - *CM_BENCHMARK*: `STANDALONE_GPTJ` + - Workflow: + * `_int4,gptj_` + - Environment variables: + - *INTEL_GPTJ_INT4*: `yes` + - Workflow: + * `_int8,gptj_` + - Environment variables: + - *INTEL_GPTJ_INT4*: `no` + - Workflow: + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_bert_,network-client` + - Environment variables: + - *CM_BENCHMARK*: `NETWORK_BERT_CLIENT` + - Workflow: + * `_bert_,network-server` + - Environment variables: + - *CM_BENCHMARK*: `NETWORK_BERT_SERVER` + - Workflow: + * `_bert_,pytorch` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,conda,_name.bert-pt + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * install,llvm,src,_tag.llvmorg-15.0.7,_runtimes.libcxx:libcxxabi:openmp,_clang,_release,_for-intel-mlperf-inference-v3.1-bert + - CM script: [install-llvm-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-src) + * get,generic-sys-util,_libffi7 + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,conda-package,_package.python + * CM names: `--adr.['conda-package', 'python']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.ncurses,_source.conda-forge + * CM names: `--adr.['conda-package', 'ncurses']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic-sys-util,_numactl + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,conda-package,_package.jemalloc,_source.conda-forge + * CM names: `--adr.['conda-package', 'jemalloc']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,pytorch,from.src,_for-intel-mlperf-inference-v3.1-bert + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * install,onednn,from.src,_for-intel-mlperf-inference-v3.1-bert + - CM script: [install-onednn-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onednn-from-src) + * install,transformers,from.src,_for-intel-mlperf-inference-v3.1-bert + - CM script: [install-transformers-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-transformers-from-src) + * `_bs.#` + - Environment variables: + - *ML_MLPERF_MODEL_BATCH_SIZE*: `#` + - Workflow: + * `_gptj_,build-harness` + - Workflow: + * `_gptj_,pytorch` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,conda,_name.gptj-pt + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,python,_conda.gptj-pt + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * install,llvm,src,_tag.llvmorg-16.0.6,_clang,_release,_for-intel-mlperf-inference-v3.1-gptj + - CM script: [install-llvm-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-src) + * get,generic,conda-package,_package.ncurses,_source.conda-forge + * CM names: `--adr.['conda-package', 'ncurses']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic-sys-util,_numactl + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,conda-package,_package.jemalloc,_source.conda-forge + * CM names: `--adr.['conda-package', 'jemalloc']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj + - CM script: [install-ipex-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-ipex-from-src) + * get,generic,conda-package,_package.ninja + * `if (INTEL_GPTJ_INT4 == yes)` + * CM names: `--adr.['conda-package', 'ninja']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * install,tpp-pex,from.src,_for-intel-mlperf-inference-v3.1-gptj + * `if (INTEL_GPTJ_INT4 == yes)` + - CM script: [install-tpp-pytorch-extension](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tpp-pytorch-extension) + * get,generic-python-lib,_package.transformers + * CM names: `--adr.['pip-package', 'transformers']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlcommons,inference,loadgen,_custom-python + * CM names: `--adr.['inference-loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * get,ml-model,large-language-model,gptj + * CM names: `--adr.['ml-model', 'gptj-model', 'gpt-j-model']...` + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + * get,generic-python-lib,_package.datasets + * CM names: `--adr.['pip-package', 'datasets']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.accelerate + * CM names: `--adr.['pip-package', 'accelerate']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_int4,gptj_,build-harness` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * reproduce,mlperf,inference,intel,harness,_calibration + * CM names: `--adr.['calibration']...` + - CM script: [app-mlperf-inference-intel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-intel) + * get,generic-python-lib,_package.optimum + * CM names: `--adr.['pip-package', 'optimum']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_resnet50,uint8` + - Environment variables: + - *CM_IMAGENET_ACCURACY_DTYPE*: `int8` + - Workflow: + * `_sapphire-rapids.112c,gptj-99,offline,int4` + - Environment variables: + - *NUM_PROC*: `4` + - *KMP_BLOCKTIME*: `1` + - *WORKERS_PER_PROC*: `3` + - Workflow: + * `_sapphire-rapids.112c,gptj-99,offline,int8` + - Environment variables: + - *KMP_BLOCKTIME*: `1` + - *WORKERS_PER_PROC*: `2` + - Workflow: + * `_sapphire-rapids.112c,gptj-99,server,int4` + - Environment variables: + - *KMP_BLOCKTIME*: `1` + - *WORKERS_PER_PROC*: `4` + - Workflow: + * `_sapphire-rapids.112c,gptj-99,server,int8` + - Environment variables: + - *KMP_BLOCKTIME*: `1` + - *WORKERS_PER_PROC*: `2` + - Workflow: + * `_sapphire-rapids.24c,bert-99` + - Environment variables: + - *WORKERS_PER_PROC*: `1` + - Workflow: + * `_sapphire-rapids.24c,gptj-99,offline,int4` + - Environment variables: + - *KMP_BLOCKTIME*: `10` + - *WORKERS_PER_PROC*: `1` + - Workflow: + * `_sapphire-rapids.24c,gptj-99,offline,int8` + - Environment variables: + - *KMP_BLOCKTIME*: `10` + - *WORKERS_PER_PROC*: `1` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - *CM_MLPERF_BACKEND_LIB_NAMESPEC*: `pytorch` + - Workflow: + +
+ + + * Group "**loadgen-batchsize**" +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_MLPERF_LOADGEN_BATCH_SIZE*: `#` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * `_offline` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - *CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - *CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3733910/files/model.onnx` + - Workflow: + * `_gptj-99` + - Environment variables: + - *CM_MODEL*: `gptj-99` + - *CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3733910/files/model.onnx` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - Workflow: + * `_gptj-99.9` + - Environment variables: + - *CM_MODEL*: `gptj-99.9` + - *CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3733910/files/model.onnx` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - *dataset_imagenet_preprocessed_input_square_side*: `224` + - *ml_model_has_background_class*: `YES` + - *ml_model_image_height*: `224` + - *loadgen_buffer_size*: `1024` + - *loadgen_dataset_size*: `50000` + - *CM_BENCHMARK*: `STANDALONE_CLASSIFICATION` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` + - *dataset_imagenet_preprocessed_input_square_side*: `224` + - *ml_model_image_height*: `800` + - *ml_model_image_width*: `800` + - *loadgen_buffer_size*: `64` + - *loadgen_dataset_size*: `24576` + - *CM_BENCHMARK*: `STANDALONE_OBJECT_DETECTION` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**network-mode**" +
+ Click here to expand this section. + + * `_network-server` + - Environment variables: + - *CM_MLPERF_NETWORK_RUN_MODE*: `network-server` + - Workflow: + * **`_standalone`** (default) + - Environment variables: + - *CM_MLPERF_NETWORK_RUN_MODE*: `standalone` + - Workflow: + +
+ + + * Group "**network-run-mode**" +
+ Click here to expand this section. + + * `_network-client` + - Environment variables: + - *CM_MLPERF_NETWORK_RUN_MODE*: `network-client` + - Workflow: + +
+ + + * Group "**power-mode**" +
+ Click here to expand this section. + + * `_maxn` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_MAXN*: `True` + - Workflow: + * `_maxq` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_MAXQ*: `True` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_fp32` + - Environment variables: + - *CM_IMAGENET_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_int4` + - Workflow: + * `_uint8` + - Workflow: + +
+ + + * Group "**run-mode**" +
+ Click here to expand this section. + + * `_build-harness` + - Environment variables: + - *CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE*: `build_harness` + - Workflow: + * `_calibration` + - Environment variables: + - *CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE*: `calibration` + - Workflow: + * **`_run-harness`** (default) + - Environment variables: + - *CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE*: `run_harness` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * reproduce,mlperf,inference,intel,harness,_build-harness + * CM names: `--adr.['build-harness']...` + - CM script: [app-mlperf-inference-intel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-intel) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + +
+ + + * Group "**sut**" +
+ Click here to expand this section. + + * `_sapphire-rapids.112c` + - Environment variables: + - *WARMUP*: ` --warmup` + - Workflow: + * `_sapphire-rapids.24c` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_pytorch,_resnet50,_run-harness,_standalone` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--rerun=value` → `CM_RERUN=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "count":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` +* CM_FAST_COMPILATION: `yes` +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_MLPERF_LOADGEN_MODE: `performance` +* CM_SKIP_PREPROCESS_DATASET: `no` +* CM_SKIP_MODEL_DOWNLOAD: `no` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `intel` +* CM_MLPERF_SKIP_RUN: `no` +* verbosity: `1` +* loadgen_trigger_cold_run: `0` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,generic-python-lib,_mlperf_logging + * CM names: `--adr.['mlperf-logging']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,ml-model,resnet50,_fp32,_onnx,_from-tf + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['resnet50-model', 'ml-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * compile,intel,model,_resnet50 + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['resnet50-compiler']...` + - *Warning: no scripts found* + * get,dataset,imagenet,preprocessed,_for.resnet50,_NHWC,_full + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['imagenet-preprocessed', 'dataset-preprocessed']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * compile,intel,model,_retinanet + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['retinanet-compiler']...` + - *Warning: no scripts found* + * get,dataset,preprocessed,openimages,_for.retinanet.onnx,_NCHW,_validation,_custom-annotations + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['openimages-preprocessed', 'dataset-preprocessed']...` + - CM script: [get-preprocessed-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openimages) + * get,mlperf,inference,results,_ctuning + * CM names: `--adr.inference-results...` + - CM script: [get-mlperf-inference-results](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/_cm.yaml) + 1. ***Run native script if exists*** + * [run_bert_harness.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/run_bert_harness.sh) + * [run_gptj_harness.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/run_gptj_harness.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-intel/_cm.yaml)*** + * benchmark-mlperf + * `if (CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE == run_harness) AND (CM_MLPERF_SKIP_RUN not in ['yes', True])` + * CM names: `--adr.['runner', 'mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + +___ +### Script output +`cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/app-mlperf-inference-intel/_cm.yaml b/script/app-mlperf-inference-intel/_cm.yaml new file mode 100644 index 0000000000..b33c0e8b50 --- /dev/null +++ b/script/app-mlperf-inference-intel/_cm.yaml @@ -0,0 +1,600 @@ +# Identification of this CM script +alias: app-mlperf-inference-intel +uid: c05a90433bb04cc1 +cache: false +can_force_cache: true + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - intel-harness + - intel + - intel-harness + - intel + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: 'yes' + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: intel + CM_MLPERF_SKIP_RUN: 'no' + verbosity: 1 + loadgen_trigger_cold_run: 0 + +env: + CM_CALL_MLPERF_RUNNER: 'no' + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: '0' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - resnet50-model + - ml-model + tags: get,ml-model,resnet50,_fp32,_onnx,_from-tf + + - enable_if_env: + CM_MODEL: + - resnet50 + tags: compile,intel,model,_resnet50 + names: + - resnet50-compiler + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + - dataset-preprocessed + tags: get,dataset,imagenet,preprocessed,_for.resnet50,_NHWC,_full + + + + ######################################################################## + # Install OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + tags: compile,intel,model,_retinanet + names: + - retinanet-compiler + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-preprocessed + - dataset-preprocessed + tags: get,dataset,preprocessed,openimages,_for.retinanet.onnx,_NCHW,_validation,_custom-annotations + + + + ######################################################################## + # Install MLPerf inference dependencies + + - tags: get,mlperf,inference,results,_ctuning + names: + inference-results + version: v3.1 + + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + enable_if_env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: + - run_harness + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + + # ML engine + pytorch: + group: framework + default: true + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_LIB_NAMESPEC: pytorch + + bs.#: + env: + ML_MLPERF_MODEL_BATCH_SIZE: "#" + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + dataset_imagenet_preprocessed_input_square_side: 224 + ml_model_has_background_class: "YES" + ml_model_image_height: 224 + loadgen_buffer_size: 1024 + loadgen_dataset_size: 50000 + CM_BENCHMARK: STANDALONE_CLASSIFICATION + + resnet50,uint8: + env: + CM_IMAGENET_ACCURACY_DTYPE: int8 + + bert-99: + deps: + - tags: compile,intel,model,_bert-99 + names: + - bert-99-compiler + env: + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + retinanet: + group: model + env: + CM_MODEL: retinanet + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + dataset_imagenet_preprocessed_input_square_side: 224 + ml_model_image_height: 800 + ml_model_image_width: 800 + loadgen_buffer_size: 64 + loadgen_dataset_size: 24576 + CM_BENCHMARK: STANDALONE_OBJECT_DETECTION + + deps: + - tags: get,generic-python-lib,_numpy + + + bert_: + env: + CM_BENCHMARK: STANDALONE_BERT + dataset_squad_tokenized_max_seq_length: 384 + loadgen_buffer_size: 10833 + loadgen_dataset_size: 10833 + + bert_,pytorch: + deps: + - tags: get,conda,_name.bert-pt + - tags: install,llvm,src,_tag.llvmorg-15.0.7,_runtimes.libcxx:libcxxabi:openmp,_clang,_release,_for-intel-mlperf-inference-v3.1-bert + - tags: get,generic-sys-util,_libffi7 + - tags: get,generic,conda-package,_package.python + names: + - conda-package + - python + version: "3.8" + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: get,pytorch,from.src,_for-intel-mlperf-inference-v3.1-bert + - tags: install,onednn,from.src,_for-intel-mlperf-inference-v3.1-bert + - tags: install,transformers,from.src,_for-intel-mlperf-inference-v3.1-bert + + gptj_: + env: + CM_BENCHMARK: STANDALONE_GPTJ + + gptj_,build-harness: + docker: + run: false + + int4,gptj_,build-harness: + deps: + - tags: reproduce,mlperf,inference,intel,harness,_calibration + inherit_variation_tags: true + names: + - calibration + skip_inherit_variation_groups: + - run-mode + - device-info + - sut + - loadgen-batchsize + force_cache: true + - tags: get,generic-python-lib,_package.optimum + names: + - pip-package + - optimum + + gptj_,pytorch: + adr: + conda-package: + tags: _name.gptj-pt + deps: + - tags: get,conda,_name.gptj-pt + - tags: get,python,_conda.gptj-pt + adr: + conda-python: + version: "3.9" + - tags: install,llvm,src,_tag.llvmorg-16.0.6,_clang,_release,_for-intel-mlperf-inference-v3.1-gptj + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj + - tags: get,generic,conda-package,_package.ninja + names: + - conda-package + - ninja + enable_if_env: + INTEL_GPTJ_INT4: + - 'yes' + - tags: install,tpp-pex,from.src,_for-intel-mlperf-inference-v3.1-gptj + enable_if_env: + INTEL_GPTJ_INT4: + - 'yes' + - tags: get,generic-python-lib,_package.transformers + names: + - pip-package + - transformers + version: "4.28.1" + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python + names: + - inference-loadgen + env: + CM_PYTHON_BIN_WITH_PATH: "<<>>/python3" + - tags: get,ml-model,large-language-model,gptj + names: + - ml-model + - gptj-model + - gpt-j-model + - tags: get,generic-python-lib,_package.datasets + names: + - pip-package + - datasets + - tags: get,generic-python-lib,_package.accelerate + names: + - pip-package + - accelerate + - tags: get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd + env: + CM_PYTHON_BIN_WITH_PATH: "<<>>/python3" + "+ CXXFLAGS": + - "-Wno-nonnull" + - "-Wno-maybe-uninitialized" + - "-Wno-uninitialized" + - "-Wno-free-nonheap-object" + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + + standalone: + group: network-mode + default: true + env: + CM_MLPERF_NETWORK_RUN_MODE: standalone + + network-server: + group: network-mode + env: + CM_MLPERF_NETWORK_RUN_MODE: network-server + + network-client: + group: network-run-mode + env: + CM_MLPERF_NETWORK_RUN_MODE: network-client + + bert_,network-server: + env: + CM_BENCHMARK: NETWORK_BERT_SERVER + + bert_,network-client: + env: + CM_BENCHMARK: NETWORK_BERT_CLIENT + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + + batch_size.#: + group: loadgen-batchsize + env: + CM_MLPERF_LOADGEN_BATCH_SIZE: "#" + + + build-harness: + group: run-mode + env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: build_harness + new_env_keys: + - CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH + - CM_ML_MODEL_* + - DATA_PATH + + calibration: + group: run-mode + env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: calibration + new_env_keys: + - CM_ML_MODEL_* + - INT4_CALIBRATION_DIR + + calibration,gptj_: + deps: [] + + build-harness,bert_: + deps: + - tags: get,generic-sys-util,_rsync + - tags: get,dataset,original,squad + names: + - squad-original + - tags: get,ml-model,bert-large,_pytorch,_int8 + names: + - bert-large + - ml-model + - tags: get,generic-python-lib,_package.tokenization + + + run-harness: + group: run-mode + default: true + deps: + - tags: reproduce,mlperf,inference,intel,harness,_build-harness + inherit_variation_tags: true + names: + - build-harness + skip_inherit_variation_groups: + - run-mode + - device-info + - sut + - loadgen-batchsize + force_cache: true + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: run_harness + + # Env keys which are exposed to higher level scripts + new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + + + maxq: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + + maxn: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + int4: + group: precision + + uint8: + group: precision + adr: + dataset-preprocessed: + tags: _uint8,_rgb8 + + int4,gptj_: + env: + INTEL_GPTJ_INT4: 'yes' + + int8,gptj_: + env: + INTEL_GPTJ_INT4: 'no' + + fp32: + group: precision + adr: + dataset-preprocessed: + tags: _float32,_rgb32 + env: + CM_IMAGENET_ACCURACY_DTYPE: float32 + + sapphire-rapids.112c: + group: sut + env: + WARMUP: " --warmup" + + sapphire-rapids.24c: + group: sut + + sapphire-rapids.24c,gptj-99,offline,int8: + env: + KMP_BLOCKTIME: 10 + WORKERS_PER_PROC: 1 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + + sapphire-rapids.24c,gptj-99,offline,int4: + env: + KMP_BLOCKTIME: 10 + WORKERS_PER_PROC: 1 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + + sapphire-rapids.112c,gptj-99,offline,int8: + env: + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 2 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 14 + + sapphire-rapids.112c,gptj-99,offline,int4: + env: + NUM_PROC: 4 + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 3 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + + sapphire-rapids.112c,gptj-99,server,int8: + env: + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 2 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + + sapphire-rapids.112c,gptj-99,server,int4: + env: + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 4 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + + sapphire-rapids.24c,bert-99: + env: + WORKERS_PER_PROC: 1 + +docker: + docker_real_run: False diff --git a/script/app-mlperf-inference-intel/build_bert_harness.sh b/script/app-mlperf-inference-intel/build_bert_harness.sh new file mode 100644 index 0000000000..4a2b957a91 --- /dev/null +++ b/script/app-mlperf-inference-intel/build_bert_harness.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH +echo $PWD + +if [ ! -d harness ]; then + mkdir -p harness +fi + +rm -rf ${CM_CONDA_LIB_PATH}/cmake/mkl/* + +rsync -avz --exclude=".git" ${CM_HARNESS_CODE_ROOT}/ harness/ +pushd harness +rsync -avz --exclude=".git" ${CM_MLPERF_INFERENCE_SOURCE}/ inference/ +test $? -eq 0 || exit $? +pushd mlperf_plugins +rm -rf onednn +rsync -avz --exclude=".git" ${CM_ONEDNN_INSTALLED_PATH}/ onednn/ +test $? -eq 0 || exit $? +popd + +mkdir build +pushd build +cmake -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DBUILD_TPPS_INTREE=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(dirname $(python3 -c 'import torch; print(torch.__file__)'));../cmake/Modules" -GNinja -DUSERCP=ON .. +test $? -eq 0 || exit $? +ninja +test $? -eq 0 || exit $? +popd +test $? -eq 0 || exit $? + +mkdir -p bert/dataset +cd bert +ln -sf ${CM_DATASET_SQUAD_VAL_PATH} dataset/dev-v1.1.json +test $? -eq 0 || exit $? +if [ ! -d model ]; then + git clone https://huggingface.co/bert-large-uncased model + cd model + rm pytorch_model.bin + ln -sf ${CM_ML_MODEL_FILE_WITH_PATH} pytorch_model.bin + test $? -eq 0 || exit $? + cd .. +fi + +cd .. +pip install boto3 tokenization +test $? -eq 0 || exit $? +bash convert.sh +test $? -eq 0 || exit $? +popd + + diff --git a/script/app-mlperf-inference-intel/build_gptj_harness.sh b/script/app-mlperf-inference-intel/build_gptj_harness.sh new file mode 100644 index 0000000000..149dcf6608 --- /dev/null +++ b/script/app-mlperf-inference-intel/build_gptj_harness.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH +echo $PWD + +if [ ! -d harness ]; then + mkdir -p harness +fi + +echo ${CM_HARNESS_CODE_ROOT} +cd ${CM_HARNESS_CODE_ROOT} +cd utils +python -m pip install . +test $? -eq 0 || exit $? +cd ../ + + +mkdir -p data +export WORKLOAD_DATA=$(pwd)/data +mkdir -p ${WORKLOAD_DATA}/model + +export INT8_MODEL_DIR=${WORKLOAD_DATA}/gpt-j-int8-model +export INT4_MODEL_DIR=${WORKLOAD_DATA}/gpt-j-int4-model + +python download-calibration-dataset.py --calibration-list-file calibration-list.txt --output-dir ${WORKLOAD_DATA}/calibration-data + +python download-dataset.py --split validation --output-dir ${WORKLOAD_DATA}/validation-data +if [[ -f ${INT8_MODEL_DIR}/best_model.pt ]]; then + exit 0 +fi + +export CALIBRATION_DATA_JSON=${WORKLOAD_DATA}/calibration-data/cnn_dailymail_calibration.json +export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json +#export INT4_CALIBRATION_DIR=${WORKLOAD_DATA}/quantized-int4-model +#sudo -E bash run_quantization.sh +#bash run_quantization.sh +echo "${RUN_QUANTIZATION_CMD}" +eval "${RUN_QUANTIZATION_CMD}" +test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh b/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh new file mode 100644 index 0000000000..75a0774d50 --- /dev/null +++ b/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +cd ${CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH} +CUR_DIR=$(pwd) +export WORKLOAD_DATA=${CUR_DIR}/data +mkdir -p ${WORKLOAD_DATA} + +python download-calibration-dataset.py --calibration-list-file calibration-list.txt --output-dir ${WORKLOAD_DATA}/calibration-data +test $? -eq 0 || exit $? + +export CALIBRATION_DATA_JSON=${WORKLOAD_DATA}/calibration-data/cnn_dailymail_calibration.json + +export CHECKPOINT_DIR=${WORKLOAD_DATA}/gpt-j-checkpoint +cmd="ln -s ${GPTJ_CHECKPOINT_PATH} ${CHECKPOINT_DIR}" +echo $cmd +eval $cmd + +export QUANTIZED_MODEL_DIR=${WORKLOAD_DATA}/quantized-int4-model +mkdir -p ${QUANTIZED_MODEL_DIR} + +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh +CONDA_INSTALL_PATH=`pwd`/miniconda3 +rm -rf ${CONDA_INSTALL_PATH} +bash miniconda.sh -b -p ${CONDA_INSTALL_PATH} +export CONDA_PREFIX=${CONDA_INSTALL_PATH} + +export PATH=${CONDA_INSTALL_PATH}/bin:$PATH +conda install -y python=3.9.0 numpy=1.23.5 +python -m pip install transformers==4.21.2 +python -m pip install texttable +python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu +python -m pip install datasets +bash run_calibration_int4.sh +test $? -eq 0 || exit $? +#exit 1 diff --git a/script/app-mlperf-inference-intel/customize.py b/script/app-mlperf-inference-intel/customize.py new file mode 100644 index 0000000000..18a03d45d7 --- /dev/null +++ b/script/app-mlperf-inference-intel/customize.py @@ -0,0 +1,132 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + import json + if 'CM_MODEL' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + ml_model = env['CM_MODEL'] + master_model = ml_model.replace("-99", "").replace("-99.9","") + master_model = master_model.replace("gptj", "gpt-j") + + backend = env['CM_MLPERF_BACKEND'] + device = env['CM_MLPERF_DEVICE'] + harness_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', ml_model, backend+"-"+device) + + env['CM_HARNESS_CODE_ROOT'] = harness_root + + if env.get('CM_MODEL') == "resnet50": + pass + + elif "bert" in env.get('CM_MODEL'): + pass + elif "retinanet" in env.get('CM_MODEL'): + pass + elif "gptj" in env.get('CM_MODEL'): + env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH'] + + script_path = i['run_script_input']['path'] + if env['CM_MODEL'] == "retinanet": + env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + + + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + + loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] + env['CONDA_PREFIX'] = env['CM_CONDA_PREFIX'] + + if env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "calibration": + calibration_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration', master_model, backend+"-"+device) + + if "gpt" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "calibrate_gptj_int4_model" + calibration_path = os.path.join(calibration_root, "INT4") + env['CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH'] = calibration_path + env['INT4_CALIBRATION_DIR'] = os.path.join(calibration_path, "data", "quantized-int4-model") + + + elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness": + print(f"Harness Root: {harness_root}") + if "bert" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_bert_harness" + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "bert_inference") + env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "bert") + elif "gpt" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_gptj_harness" + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "gptj_inference") + env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "gptj") + env['MLPERF_INFERENCE_ROOT'] = env['CM_MLPERF_INFERENCE_SOURCE'] + if env.get('INTEL_GPTJ_INT4', '') == 'yes': + model_precision = "int4" + env['RUN_QUANTIZATION_CMD'] = "bash run_quantization_int4.sh" + else: + model_precision = "int8" + env['RUN_QUANTIZATION_CMD'] = "bash run_quantization.sh" + final_model_path = os.path.join(harness_root, "data", f"gpt-j-{model_precision}-model", "best_model.pt") + model_dir_name = f"{model_precision.upper()}_MODEL_DIR" + env[model_dir_name] = os.path.dirname(final_model_path) + if not os.path.exists(env[model_dir_name]): + os.makedirs(env[model_dir_name]) + env['CM_ML_MODEL_PATH'] = env[model_dir_name] + if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH', '') != '' and env.get('INT8_MODEL_DIR', '') != '': + shutil.copy(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'], env[model_dir_name]) + if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH', '') != '' and env.get('INT4_MODEL_DIR', '') != '': + shutil.copy(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'], env[model_dir_name]) + + elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness": + print(f"Harness Root: {harness_root}") + if env.get('CM_MLPERF_LOADGEN_MODE', '') == "compliance": + audit_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] + shutil.copy(audit_path, env['CM_RUN_DIR']) + + if 'bert' in env['CM_MODEL']: + env['MODEL_PATH'] = os.path.dirname(os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['DATASET_PATH'] = os.path.dirname(os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_RUN_CMD'] = "bash run_bert_harness.sh " + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" else "") + elif "gptj" in env['CM_MODEL']: + if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": + env['LOADGEN_MODE'] = 'Accuracy' + else: + env['LOADGEN_MODE'] = 'Performance' + if env.get('INTEL_GPTJ_INT4', '') == 'yes': + model_precision = "int4" + env['INT4_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] + env['QUANTIZED_MODEL'] = os.path.join(env['INT4_MODEL_DIR'], "best_int4_model.pt") + env['PRECISION'] = "int4_bf16_mixed" + else: + env['INT8_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] + env['QUANTIZED_MODEL'] = os.path.join(env["INT8_MODEL_DIR"], "best_model.pt") + env['PRECISION'] = "int8" + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_RUN_CMD'] = "bash run_gptj_harness.sh " + + return {'return':0} + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return':0} diff --git a/script/app-mlperf-inference-intel/run_bert_harness.sh b/script/app-mlperf-inference-intel/run_bert_harness.sh new file mode 100644 index 0000000000..2875fac724 --- /dev/null +++ b/script/app-mlperf-inference-intel/run_bert_harness.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +THREADS_PER_INSTANCE=$(((4 * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS})) + +export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so +export MALLOC_CONF="oversize_threshold:1,background_thread:true,percpu_arena:percpu,metadata_thp:always,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"; + +accuracy=$1 + +number_threads=`nproc --all` +export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') +num_instance=$(($number_cores / $THREADS_PER_INSTANCE)) + +sut_dir=${MODEL_PATH} +executable=${CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH} +mode=${CM_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${CM_MLPERF_OUTPUT_DIR}" + +#python ../../user_config.py +USER_CONF="${CM_MLPERF_USER_CONF}" + +CONFIG="-n ${num_numa} -i ${num_instance} -j ${THREADS_PER_INSTANCE} --test_scenario=${mode} --model_file=${sut_dir}/bert.pt --sample_file=${sut_dir}/squad.pt --mlperf_config=${CM_MLPERF_CONF} --user_config=${USER_CONF} -o ${OUTDIR} -w 1300 --warmup ${accuracy}" + +${executable} ${CONFIG} diff --git a/script/app-mlperf-inference-intel/run_gptj_harness.sh b/script/app-mlperf-inference-intel/run_gptj_harness.sh new file mode 100644 index 0000000000..f006f673bd --- /dev/null +++ b/script/app-mlperf-inference-intel/run_gptj_harness.sh @@ -0,0 +1,50 @@ +#!/bin/bash +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +export KMP_BLOCKTIME=${KMP_BLOCKTIME} +export KMP_AFFINITY=granularity=fine,compact,1,0 +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so + +export num_physical_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') + +NUM_PROC=${NUM_PROC:-num_numa} +CPUS_PER_PROC=$((num_physical_cores/num_numa)) +WORKERS_PER_PROC=${WORKERS_PER_PROC} +TOTAL_SAMPLE_COUNT=13368 +BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE} +TIMESTAMP=$(date +%m-%d-%H-%M) +HOSTNAME=$(hostname) +#OUTPUT_DIR=offline-output-${HOSTNAME}-batch-${BATCH_SIZE}-procs-${NUM_PROC}-ins-per-proc-${WORKERS_PER_PROC}-${TIMESTAMP} + +export WORKLOAD_DATA=${CM_HARNESS_CODE_ROOT}/data +export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json + +cd ${CM_HARNESS_CODE_ROOT} +OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" + +USER_CONF="${CM_MLPERF_USER_CONF}" + + +cmd="python runner.py --workload-name gptj \ + --scenario ${${CM_MLPERF_LOADGEN_SCENARIO}} \ + --mode ${LOADGEN_MODE} \ + --num-proc ${NUM_PROC} \ + --cpus-per-proc ${CPUS_PER_PROC} \ + --model-checkpoint-path ${CHECKPOINT_DIR} \ + ${WARMUP} \ + --dataset-path ${VALIDATION_DATA_JSON} \ + --batch-size ${BATCH_SIZE} \ + --mlperf-conf ${CM_MLPERF_CONF} \ + --user-conf ${CM_MLPERF_USER_CONF} \ + --precision ${PRECISION} \ + --pad-inputs \ + --quantized-model ${QUANTIZED_MODEL} \ + --workers-per-proc ${WORKERS_PER_PROC} \ + --total-sample-count ${TOTAL_SAMPLE_COUNT} \ + --output-dir ${OUTPUT_DIR} \ + 2>&1 | tee ${OUTPUT_DIR}.log" + +echo "$cmd" +eval "$cmd" diff --git a/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md b/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md new file mode 100644 index 0000000000..16850be95b --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to the MLCommons + +The best way to contribute to the MLCommons is to get involved with one of our many project communities. +You find more information about getting involved with MLCommons [here](https://mlcommons.org/en/get-involved/#getting-started). + +Generally we encourage people to become a MLCommons member if they wish to contribute to MLCommons projects, +but outside pull requests are very welcome too. + +Regardless of if you are a member, your organization needs to sign the MLCommons CLA. +Please fill out this [CLA sign up form](https://forms.gle/Ew1KkBVpyeJDuRw67) form to get started. + +MLCommons project work is tracked with issue trackers and pull requests. +Modify the project in your own fork and issue a pull request once you want other developers +to take a look at what you have done and discuss the proposed changes. +Ensure that cla-bot and other checks pass for your Pull requests. + +## Contributing to this project + +Please join our [Discord server](https://discord.gg/JjWNWXKxwT) +to learn about how to use the CK technology v3 (including the MLCommons CM automation language, CK playground +and Modular Inference Library) or participate in collaborative developments. + +Thank you for your support and looking forward to collaborating with you! + +## Core contributors + +* [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189) +* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) +* [Grigori Fursin](https://cKnowledge.org/gfursin). diff --git a/script/app-mlperf-inference-mlcommons-cpp/README-extra.md b/script/app-mlperf-inference-mlcommons-cpp/README-extra.md new file mode 100644 index 0000000000..b344ea7ad7 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/README-extra.md @@ -0,0 +1,83 @@ +# About + +The MLCommons C++ Modular Inference Library (MIL) is a community project to provide +a simple and extensible C++ harness to connect diverse ML models, frameworks, data sets and hardware +backends to the [MLPerf loadgen](https://github.com/mlcommons/inference/tree/master/loadgen) +and run it using the [MLCommons CM automation language](https://github.com/mlcommons/ck/tree/master/cm). + +It is intended to help new submitters add new hardware backends to MLPerf, +optimize their MLPerf results using low-level knobs, +and automate their submission using the MLCommons CM automation language. + +MIL is maintained and extended by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +based on user feedback to make it easier to run, optimize and reproduce MLPerf inference benchmarks +across diverse platforms with continuously changing software and hardware. + +MIL was originally developed by [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189) + +[![License](https://img.shields.io/badge/License-Apache%202.0-green)](https://github.com/mlcommons/ck/tree/master/cm) +[![CM repository](https://img.shields.io/badge/Collective%20Mind-compatible-blue)](https://github.com/mlcommons/ck) + +© 2021-2023 [MLCommons](https://mlcommons.org)
+ +## About + +This is a modularized C++ implementation of an MLPerf Inference SUT. Each file corresponds to a different class that can be changed independently of other ones: +1. `Backend` runs the actual inference using a framework (ONNX Runtime, TF Lite, etc) +2. `Device` manages devices and memory (CPU, GPU, etc) +3. `Model` is a struct representing a model file (ResNet50, etc) +4. `SampleLibrary` is a dataset loader (ImageNet, COCO, etc) +5. `System` is the SUT interface to LoadGen which manages how input queries are issued + +Data flow: +* Init + 1. All classes are initialized, e.g. `Backend` is initialized with selected `Model` and `Device` +* Loading samples to memory + 1. LoadGen calls `SampleLibrary->LoadSamplesFromRam()` + 2. `SampleLibrary` reads sample (e.g. from .npy file) and calls `Backend->LoadSampleFromRam()` + 3. `Backend` stores samples contiguously into each device memory, e.g. by `Device->Write()` +* Running the model + 1. LoadGen calls `System->IssueQuery()` + 2. `System` gathers a batch of samples, selects a device concurrency (e.g. the 3rd CPU core) and calls `Backend->IssueBatch()` + 3. `Backend` retrieves pointers to input data in device memory, and calls `RunInference()` implemented by a derived class, e.g. `OnnxRuntimeBackend->RunInference()` + 4. in this example, `OnnxRuntimeBackend->RunInference()` calls the ONNX Runtime API with the retrieved pointers as input, packs the raw ONNX Runtime output to LoadGen format via `Model->PostProcess()`, and sends the response to LoadGen + 5. LoadGen records the latency from 1 to 4 + +See comments in code for each class for details. + +## Examples + +### ResNet50, ONNX Runtime, CPU, Accuracy +```sh +cm run script "cpp mlperf _resnet50 _onnxruntime _cpu" \ + --output_dir= \ + --count=500 \ + --max_batchsize=32 \ + --mode=accuracy + +python \ + /PATH/TO/inference/vision/classification_and_detection/tools/accuracy-imagenet.py \ + --mlperf-accuracy-file=/mlperf_log_accuracy.json \ + --imagenet-val-file `cm find cache --tags=imagenet-aux`/data/val.txt \ + --dtype int64 +``` + +### RetinaNet, ONNX Runtime, GPU, Accuracy + +Install dataset: +```sh +cm run script --tags=get,preprocessed,openimages,_500,_NCHW +``` + +Run benchmark: +```sh +cm run script "cpp mlperf _retinanet _onnxruntime _cuda" \ + --output_dir= \ + --count=500 \ + --max_batchsize=1 \ + --mode=accuracy + +python /PATH/TO/inference/vision/classification_and_detection/tools/accuracy-openimages.py \ + --mlperf-accuracy-file /mlperf_log_accuracy.json \ + --openimages-dir `cm find cache --tags=openimages,original`/install +``` diff --git a/script/app-mlperf-inference-mlcommons-cpp/README.md b/script/app-mlperf-inference-mlcommons-cpp/README.md new file mode 100644 index 0000000000..b3ef641462 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/README.md @@ -0,0 +1,329 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-cpp** + +Category: **Modular MLPerf inference benchmark pipeline** + +License: **Apache 2.0** + +Developers: [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference-mlcommons-cpp,bf62405e6c7a44bf) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-cpp)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *app,mlcommons,mlperf,inference,cpp* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app mlcommons mlperf inference cpp" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,mlcommons,mlperf,inference,cpp` + +`cm run script --tags=app,mlcommons,mlperf,inference,cpp[,variations] [--input_flags]` + +*or* + +`cmr "app mlcommons mlperf inference cpp"` + +`cmr "app mlcommons mlperf inference cpp [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,mlcommons,mlperf,inference,cpp' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,mlcommons,mlperf,inference,cpp"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,mlcommons,mlperf,inference,cpp) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app mlcommons mlperf inference cpp[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_multistream,resnet50` + - Workflow: + * `_multistream,retinanet` + - Workflow: + * `_offline,resnet50` + - Workflow: + * `_resnet50,multistream` + - Workflow: + * `_resnet50,offline` + - Workflow: + * `_resnet50,server` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_batch-size.#` + - Environment variables: + - *CM_MLPERF_LOADGEN_MAX_BATCHSIZE*: `#` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_onnxruntime`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `onnxruntime` + - *CM_MLPERF_BACKEND_LIB_NAMESPEC*: `onnxruntime` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - Workflow: + * `_tf` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tf` + - Workflow: + * `_tflite` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tflite` + - Workflow: + * `_tvm-onnx` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tvm-onnx` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * **`_offline`** (default) + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - *CM_MLPERF_LOADGEN_MAX_BATCHSIZE*: `1` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_offline,_onnxruntime,_resnet50` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "count":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` +* CM_FAST_COMPILATION: `yes` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `cpp` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,cuda,_cudnn + * `if (CM_MLPERF_DEVICE == gpu)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,loadgen + * CM names: `--adr.['loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,lib,onnxruntime,lang-cpp,_cpu + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == cpu)` + - CM script: [get-onnxruntime-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-onnxruntime-prebuilt) + * get,lib,onnxruntime,lang-cpp,_cuda + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == gpu)` + - CM script: [get-onnxruntime-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-onnxruntime-prebuilt) + * get,dataset,preprocessed,imagenet,_NCHW + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['imagenet-preprocessed']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,ml-model,raw,resnet50,_onnx + * `if (CM_MODEL == resnet50)` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,dataset,preprocessed,openimages,_validation,_NCHW + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['openimages-preprocessed']...` + - CM script: [get-preprocessed-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openimages) + * get,ml-model,retinanet,_onnx,_fp32 + * `if (CM_MODEL == retinanet)` + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-cpp/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-cpp/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml)*** + * compile,cpp-program + * `if (CM_MLPERF_SKIP_RUN != yes)` + * CM names: `--adr.['compile-program']...` + - CM script: [compile-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-program) + * benchmark-mlperf + * `if (CM_MLPERF_SKIP_RUN != yes)` + * CM names: `--adr.['mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + +___ +### Script output +`cmr "app mlcommons mlperf inference cpp [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_LIST` +* `CM_MLPERF_CONF` +* `CM_MLPERF_DEVICE` +* `CM_MLPERF_USER_CONF` \ No newline at end of file diff --git a/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml b/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml new file mode 100644 index 0000000000..e13bab985a --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml @@ -0,0 +1,260 @@ +# Identification of this CM script +alias: app-mlperf-inference-mlcommons-cpp +uid: bf62405e6c7a44bf + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - app + - mlcommons + - mlperf + - inference + - cpp + + + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: "yes" + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: cpp + + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_ML_MODEL_* + - CM_HW_NAME + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect CUDA if required + - tags: get,cuda,_cudnn + enable_if_env: + CM_MLPERF_DEVICE: + - gpu + + ######################################################################## + # Install MLPerf inference dependencies + + # Install MLPerf loadgen + - tags: get,loadgen + names: + - loadgen + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + ######################################################################## + # Install ML engines via CM + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - cpu + tags: get,lib,onnxruntime,lang-cpp,_cpu + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + tags: get,lib,onnxruntime,lang-cpp,_cuda + + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + tags: get,dataset,preprocessed,imagenet,_NCHW + + - enable_if_env: + CM_MODEL: + - resnet50 + tags: get,ml-model,raw,resnet50,_onnx + + + ######################################################################## + # Install RetinaNet model (ONNX) and OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-preprocessed + tags: get,dataset,preprocessed,openimages,_validation,_NCHW + + - enable_if_env: + CM_MODEL: + - retinanet + tags: get,ml-model,retinanet,_onnx,_fp32 + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + +# Post dependencies to compile and run this app +post_deps: + + - names: + - compile-program + tags: compile,cpp-program + skip_if_env: + CM_MLPERF_SKIP_RUN: + - "yes" + + - names: + - mlperf-runner + tags: benchmark-mlperf + skip_if_env: + CM_MLPERF_SKIP_RUN: + - "yes" + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + # ML engine + onnxruntime: + group: framework + default: true + env: + CM_MLPERF_BACKEND: onnxruntime + CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime + + pytorch: + group: framework + env: + CM_MLPERF_BACKEND: pytorch + + tf: + group: framework + env: + CM_MLPERF_BACKEND: tf + + tflite: + group: framework + env: + CM_MLPERF_BACKEND: tflite + + tvm-onnx: + group: framework + env: + CM_MLPERF_BACKEND: tvm-onnx + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + env: + CM_MODEL: retinanet + + resnet50,offline: + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 + + resnet50,server: + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 + + resnet50,multistream: + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 8 + + batch-size.#: + group: batch-size + env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" + + offline: + group: loadgen-scenario + default: true + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + multistream,resnet50: + default_variations: + batch-size: batch-size.8 + + offline,resnet50: + default_variations: + batch-size: batch-size.32 + + multistream,retinanet: + default_variations: + batch-size: batch-size.1 diff --git a/script/app-mlperf-inference-mlcommons-cpp/customize.py b/script/app-mlperf-inference-mlcommons-cpp/customize.py new file mode 100644 index 0000000000..ebe8cf7d97 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/customize.py @@ -0,0 +1,98 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + automation = i['automation'] + + meta = i['meta'] + + if os_info['platform'] == 'windows': + print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print ('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!') + print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') +# # Currently support only LLVM on Windows +# print ('# Forcing LLVM on Windows') +# r = automation.update_deps({'deps':meta['post_deps'], 'update_deps':{'compile-program': {'adr':{'compiler':{'tags':'llvm'}}}}}) +# if r['return']>0: return r + + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + if 'CM_MODEL' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + source_files = [] + script_path = i['run_script_input']['path'] + if env['CM_MODEL'] == "retinanet": + env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + env['CM_SOURCE_FOLDER_PATH'] = os.path.join(script_path, "src") + + for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + if file.endswith(".c") or file.endswith(".cpp"): + source_files.append(file) + + env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + + if '+CPLUS_INCLUDE_PATH' not in env: + env['+CPLUS_INCLUDE_PATH'] = [] + + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + + if env['CM_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + env['+ CXXFLAGS'].append("-std=c++14") + + # add preprocessor flag like "#define CM_MODEL_RESNET50" + env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) + # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + env['CM_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + env['CM_MLPERF_DEVICE'].upper()) + + if '+ LDCXXFLAGS' not in env: + env['+ LDCXXFLAGS'] = [ ] + + env['+ LDCXXFLAGS'] += [ + "-lmlperf_loadgen", + "-lpthread" + ] + # e.g. -lonnxruntime + if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + # e.g. -lcudart + if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + + env['CM_LINKER_LANG'] = 'CXX' + env['CM_RUN_DIR'] = os.getcwd() + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + return {'return':0} + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return':0} diff --git a/script/app-mlperf-inference-mlcommons-cpp/dockerfiles/ubuntu_22.04.Dockerfile b/script/app-mlperf-inference-mlcommons-cpp/dockerfiles/ubuntu_22.04.Dockerfile new file mode 100644 index 0000000000..7f9b8c493c --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/dockerfiles/ubuntu_22.04.Dockerfile @@ -0,0 +1,38 @@ +FROM ubuntu:22.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests giturlparse + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo ctuning@mlcommons-ck + +# Install all system dependencies +RUN cm run script --tags=get,sys-utils-cm --quiet + +# Run commands +RUN cm run script --tags=app,mlperf,inference,_intel-original,_gptj-99 --quiet --fake_run --env.CM_RUN_STATE_DOCKER=True diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/backend.h b/script/app-mlperf-inference-mlcommons-cpp/inc/backend.h new file mode 100644 index 0000000000..ccfdd25ea6 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/backend.h @@ -0,0 +1,304 @@ +#ifndef BACKEND_H_ +#define BACKEND_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "loadgen.h" +#include "query_sample.h" + +#include "device.h" +#include "model.h" + +/** + * The Backend base class manages how samples are stored in memory, + * receives queries from SUT and issues them to derived classes via RunInference. + * + * For memory storage, on calls to LoadSampleFromRam() from the QSL, loaded samples are + * stored contiguously into each device memory. The Backend class retains the + * location of every sample in device memory. + * + * When SUT issues a batch to run on a device concurrency, the backend class retrieves + * the location in memory of this batch, and passes this to RunInference implemented by + * derived classes (e.g. OnnxRuntimeBackend). + */ +class Backend { +public: + Backend(std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size) + : model(model), device(device) + , performance_sample_count(performance_sample_count), batch_size(batch_size) + , num_memory(device->NumMemory()), num_inputs(model->num_inputs) + , batch_memory_mutex(num_memory) { + // have batch_size padding at the end that cycles back to beginning for contiguity + size_t memory_capacity = performance_sample_count + batch_size; + samples.resize(memory_capacity); + sample_memory.resize(num_inputs); + sample_memory_size.resize(num_inputs, 0); + sample_memory_offset.resize(num_inputs); + batch_memory.resize(num_inputs); + for (size_t i = 0; i < num_inputs; i++) { + sample_memory[i].resize(num_memory); + batch_memory[i].resize(num_memory); + for (size_t j = 0; j < num_memory; j++) { + sample_memory[i][j] = + device->Alloc(j, model->input_sizes[i] * memory_capacity); + // working memory for an incontiguous batch + batch_memory[i][j] = + device->Alloc(j, model->input_sizes[i] * batch_size); + } + sample_memory_offset[i].resize(memory_capacity, 0); + } + + if (performance_sample_count == 0) + std::cerr << "warning: performance sample count = 0" << std::endl; + } + + virtual ~Backend() { + for (size_t i = 0; i < num_inputs; i++) { + for (size_t j = 0; j < num_memory; j++) { + device->Free(j, sample_memory[i][j]); + device->Free(j, batch_memory[i][j]); + } + } + } + + size_t NumConcurrency() const { + return device->NumConcurrency(); + } + + size_t PerformanceSampleCount() const { + return performance_sample_count; + } + + size_t MaxBatchSize() const { + return batch_size; + } + + // load input to device memory + void LoadSampleToRam( + mlperf::QuerySampleIndex sample_index, + const std::vector> &input_datas, + const std::vector &input_sizes, + const std::vector> &input_shapes) { + size_t index_in_memory = num_samples_in_memory; + Sample sample; + sample.index = sample_index; + sample.shape = input_shapes; + sample.size = input_sizes; + sample.index_in_memory = index_in_memory; + + samples[index_in_memory] = sample; + sample_map[sample_index] = sample; + + for (size_t input_index = 0; input_index < num_inputs; input_index++) { + const std::vector &input_data = input_datas[input_index]; + size_t input_size = input_sizes[input_index]; + + if (sample_memory_size[input_index] + input_size > + (performance_sample_count + batch_size) * model->input_sizes[input_index]) + std::cerr << "warning: memory exceeded; try increasing model->input_sizes" << std::endl; + + // write to end of memory + sample_memory_offset[input_index][index_in_memory] = sample_memory_size[input_index]; + sample_memory_size[input_index] += input_size; + for (size_t j = 0; j < num_memory; j++) { + void *destination = GetMemoryAddress(input_index, j, index_in_memory); + device->Write(j, destination, input_data); + } + } + + num_samples_in_memory++; + } + + void FinishLoading() { + return; //This probably needs a FinishUnLoading counterpart + // copy first batch to end of memory to form cycle + for (size_t k = 0; k < batch_size - 1; k++) { + size_t index_in_memory = k % performance_sample_count; + std::vector> data(num_inputs); + for (size_t i = 0; i < num_inputs; i++) + device->Read( + 0, data[i], GetMemoryAddress(i, 0, index_in_memory), samples[index_in_memory].size[i]); + // LoadSampleToRam copies samples[index_in_memory] to end of memory + LoadSampleToRam( + samples[index_in_memory].index, data, + samples[index_in_memory].size, samples[index_in_memory].shape); + } + // write substrings of samples vector to contiguity tree + + for (size_t start = 0; start < num_samples_in_memory; start++) { + Trie *node = &batches; + for (size_t end = start; end < std::min(start + batch_size, num_samples_in_memory); end++) { + node = &node->children[samples[end].index]; + node->index_in_memory = samples[start].index_in_memory; + } + } + } + + void UnloadSampleFromRam(mlperf::QuerySampleIndex sample_index) { + for (size_t i = 0; i < num_inputs; i++) + sample_memory_size[i] -= GetSampleSize(sample_index, i); + batches.children.erase(sample_index); + num_samples_in_memory--; + } + + void IssueBatch( + size_t concurrency_index, + const std::vector &batch) { + // determine contiguity + bool contiguous = true; + Trie *node = &batches; + for (const mlperf::QuerySample &sample : batch) { + auto next = node->children.find(sample.index); + if (next != node->children.end()) { + node = &next->second; + } else { + contiguous = false; + break; + } + } + // std::cerr << "node " << concurrency_index + // << " running batch #" << batch.front().index << "-#" << batch.back().index + // << " (" << (contiguous ? "contiguous" : "incontiguous") << ")" << std::endl; + + // batch pointer in memory [input_index] + std::vector batch_data(num_inputs); + + // gather samples + size_t memory_index = device->GetMemoryIndex(concurrency_index); + // might use batch_memory + std::unique_lock batch_memory_lock{batch_memory_mutex[memory_index], std::defer_lock}; + for (size_t i = 0; i < num_inputs; i++) { + // if input is contiguous, use input directly as batch address + // otherwise, gather a batch to batch_memory + if (contiguous) { + batch_data[i] = GetMemoryAddress(i, memory_index, node->index_in_memory); + } else { + // copy data if not contiguous + batch_memory_lock.lock(); + for (size_t k = 0; k < batch.size(); k++) { + const mlperf::QuerySample &sample = batch[k]; + void *sample_address = GetMemoryAddress(i, memory_index, sample_map[sample.index].index_in_memory); + void *batch_sample_address = GetBatchMemoryAddress(i, memory_index, k); + device->Copy(memory_index, batch_sample_address, sample_address, GetSampleSize(sample.index, i)); + } + batch_data[i] = batch_memory[i][memory_index]; + } + } + + RunInference(concurrency_index, batch, batch_data); + } + + void *GetMemoryAddress(size_t input_index, size_t memory_index, size_t index_in_memory) const { + size_t offset = sample_memory_offset[input_index][index_in_memory]; + return static_cast(sample_memory[input_index][memory_index]) + offset; + } + + void *GetBatchMemoryAddress(size_t input_index, size_t memory_index, size_t index_in_memory) const { + size_t offset = index_in_memory * model->input_sizes[input_index]; + return static_cast(batch_memory[input_index][memory_index]) + offset; + } + + const std::vector &GetSampleShape(mlperf::QuerySampleIndex sample_index, size_t input_index) { + return sample_map[sample_index].shape[input_index]; + } + + size_t GetSampleSize(mlperf::QuerySampleIndex sample_index, size_t input_index) { + return sample_map[sample_index].size[input_index]; + } + + void SetDeviceConcurrencyIndex(size_t concurrency_index) { + device->SetConcurrencyIndex(concurrency_index); + } + + /** + * @brief Runs inference on a batch of samples and calls mlperf::QuerySamplesComplete + * + * @param concurrency_index which device concurrency (device/core/thread) to run on + * @param batch the indices of the input + * @param batch_data pointer to inputs in the device memory + */ + virtual void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) = 0; + +protected: + std::shared_ptr model; + std::shared_ptr device; + size_t performance_sample_count; + size_t batch_size; + size_t num_memory; + size_t num_inputs; + +private: + // sample_memory[input_index][memory_index] points to loaded input buffer in device memory + std::vector> sample_memory; + // sample_memory_size[input_index] is current # bytes in this buffer + std::vector sample_memory_size; + // sample_memory_offset[input_index][index_in_memory] is the offset to a sample input + std::vector> sample_memory_offset; + // batch_memory[input_index][memory_index] points to working memory for a batch + std::vector> batch_memory; + + // batch_memory_mutex[memory_index] is mutex for using batch_memory + std::vector batch_memory_mutex; + + // smallest unit of input stored in memory + struct Sample { + mlperf::QuerySampleIndex index; + size_t index_in_memory; + // sample data and sizes indexed by input_index + std::vector> shape; + std::vector size; + }; + // information of samples currently in memory (ordered) + std::vector samples; + // number of samples currently in memory + size_t num_samples_in_memory{0}; + // sample_map[sample_id] last recorded sample with sample_id in memory + std::map sample_map; + + // tree for determining batch contiguity (index tree) + struct Trie { + // index_in_memory is location of a contiguous batch from root to this node + size_t index_in_memory; + std::map children; + }; + Trie batches; +}; + +class DummyBackend : public Backend { +public: + DummyBackend( + std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size) + : Backend(model, device, performance_sample_count, batch_size) {} + + void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) override { + size_t size = batch.size(); + std::vector response(size); + for (size_t i = 0; i < size; i++) { + response[i].id = batch[i].id; + response[i].data = reinterpret_cast(&dummy_response); + response[i].size = sizeof(int64_t); + } + mlperf::QuerySamplesComplete(response.data(), response.size()); + } + +private: + // labels for ImageNet samples #1, #324 + int64_t dummy_response{65}; +}; + +#endif // BACKEND_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/common.h b/script/app-mlperf-inference-mlcommons-cpp/inc/common.h new file mode 100644 index 0000000000..f572cdd13d --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/common.h @@ -0,0 +1,5 @@ +std::string getenv(const std::string& name, const std::string& default_value) { + const char* value = std::getenv(name.c_str()); + return value ? value : default_value; + } + diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/device.h b/script/app-mlperf-inference-mlcommons-cpp/inc/device.h new file mode 100644 index 0000000000..7f68027b6c --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/device.h @@ -0,0 +1,64 @@ +#ifndef DEVICE_H_ +#define DEVICE_H_ + +#include +#include +#include + +/** + * This class represents device and memory that the benchmark is run on. + * + * It assumes there are NumMemory() separate memories on the device(s), + * and NumConcurrency() concurrencies for running the model, + * each concurrency with access to the memory GetMemoryIndex(concurrency_index). + * + * For example, a single CPU can have 1 memory (RAM), + * any number of concurrencies (may be number of cores), and each concurrency + * with access to the only memory. + * 2 GPUs can have 2 memories (one for each GPU), 2 concurrencies (one for each GPU). + * + * The Alloc, Free, Read, Write, Copy operations are for the corresponding device memory. + */ +class Device { +public: + virtual size_t NumConcurrency() const = 0; + virtual size_t NumMemory() const = 0; + virtual size_t GetMemoryIndex(size_t concurrency_index) const = 0; + virtual void *Alloc(size_t memory_index, size_t size) = 0; + virtual void Free(size_t memory_index, void *data) = 0; + virtual void Read(size_t memory_index, std::vector &to, const void *from, size_t size) = 0; + virtual void Write(size_t memory_index, void *to, const std::vector &from) = 0; + virtual void Copy(size_t memory_index, void *to, const void *from, size_t size) = 0; + // This is specifically for CUDA, which needs to set a device index for each host thread + virtual void SetConcurrencyIndex(size_t concurrency_index) {} +}; + +class CPUDevice : public Device { + size_t NumConcurrency() const override { + return 2;//std::thread::hardware_concurrency(); + } + size_t NumMemory() const override { + return 2; + } + size_t GetMemoryIndex(size_t concurrency_index) const override { + return 0; + } + void *Alloc(size_t memory_index, size_t size) override { + return std::malloc(size); + } + void Free(size_t memory_index, void *data) override { + std::free(data); + } + void Read(size_t memory_index, std::vector &to, const void *from, size_t size) override { + to.resize(size); + std::memcpy(to.data(), from, size); + } + void Write(size_t memory_index, void *to, const std::vector &from) override { + std::memcpy(to, from.data(), from.size()); + } + void Copy(size_t memory_index, void *to, const void *from, size_t size) override { + std::memcpy(to, from, size); + } +}; + +#endif // DEVICE_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h b/script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h new file mode 100644 index 0000000000..e451417f5b --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h @@ -0,0 +1,53 @@ +#ifndef GPU_DEVICE_H_ +#define GPU_DEVICE_H_ + +#include + +#include "cuda.h" +#include "cuda_runtime.h" + +#include "device.h" + +#define CHECK_CUDA_SUCCESS(x) if ((x) != cudaSuccess) std::cerr << "encountered CUDA error" << std::endl; + +class GPUDevice : public Device { + size_t NumConcurrency() const override { + return NumMemory(); + } + size_t NumMemory() const override { + int num_devices; + CHECK_CUDA_SUCCESS(cudaGetDeviceCount(&num_devices)); + return num_devices; + } + size_t GetMemoryIndex(size_t concurrency_index) const override { + return concurrency_index; + } + void *Alloc(size_t memory_index, size_t size) override { + void *data; + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMalloc(&data, size)); + return data; + } + void Free(size_t memory_index, void *data) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaFree(data)); + } + void Read(size_t memory_index, std::vector &to, const void *from, size_t size) override { + to.resize(size); + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMemcpy(to.data(), from, size, cudaMemcpyDeviceToHost)); + } + void Write(size_t memory_index, void *to, const std::vector &from) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMemcpy(to, from.data(), from.size(), cudaMemcpyHostToDevice)); + } + void Copy(size_t memory_index, void *to, const void *from, size_t size) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMemcpy(to, from, size, cudaMemcpyDeviceToDevice)); + } + void SetConcurrencyIndex(size_t concurrency_index) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(concurrency_index)); + } +}; + +#endif // GPU_DEVICE_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/model.h b/script/app-mlperf-inference-mlcommons-cpp/inc/model.h new file mode 100644 index 0000000000..163869890a --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/model.h @@ -0,0 +1,124 @@ +#ifndef MODEL_H_ +#define MODEL_H_ + +#include +#include +#include +#include + +#include "query_sample.h" + +class Model { +public: + Model( + std::string model_path, + size_t num_inputs, std::vector input_names, + std::vector input_sizes, std::vector> input_shapes, + size_t num_outputs, std::vector output_names, + std::vector output_sizes, std::vector> output_shapes) : + model_path(model_path), + num_inputs(num_inputs), input_names(input_names), input_sizes(input_sizes), input_shapes(input_shapes), + num_outputs(num_outputs), output_names(output_names), output_sizes(output_sizes), output_shapes(output_shapes) {} + + std::string model_path; + + size_t num_inputs; + std::vector input_names; + // maximum size for memory allocation purpose + std::vector input_sizes; + // input shape, if fixed + std::vector> input_shapes; + + size_t num_outputs; + std::vector output_names; + // output size & shape, if fixed + std::vector output_sizes; + std::vector> output_shapes; + + /** + * @brief Post-process raw output from model and store in LoadGen response + * + * @param index query sample index + * @param raw raw outputs + * @param raw_shapes shapes of corresponding outputs + * @param response_buffer response buffer to write to + */ + virtual void PostProcess( + mlperf::QuerySampleIndex index, + const std::vector &raw, + const std::vector> &raw_shapes, + std::vector &response_buffer) = 0; +}; + +class Resnet50 : public Model { +public: + Resnet50(std::string model_path, int64_t argmax_shift) : + Model( + model_path, + 1, {"input_tensor:0"}, {3 * 224 * 224 * sizeof(float)}, {{3, 224, 224}}, + 1, {"ArgMax:0"}, {sizeof(int64_t)}, {{1}}), + argmax_shift(argmax_shift) {} + + void PostProcess( + mlperf::QuerySampleIndex index, + const std::vector &raw, + const std::vector> &raw_shapes, + std::vector &response_buffer) override { + response_buffer.resize(sizeof(int64_t)); + int64_t *buffer = reinterpret_cast(response_buffer.data()); + buffer[0] = *static_cast(raw.front()) + argmax_shift; + } + +private: + int64_t argmax_shift; +}; + +class Retinanet : public Model { +public: + Retinanet(std::string model_path, size_t width, size_t height, float score_threshold) : + Model( + model_path, + 1, {"images"}, {3 * width * height * sizeof(float)}, {{3, width, height}}, + // no fixed output sizes/shapes + 3, {"boxes", "scores", "labels"}, {0, 0, 0}, {{0, 4}, {0}, {0}}), + width(width), height(height), + score_threshold(score_threshold) {} + + void PostProcess( + mlperf::QuerySampleIndex index, + const std::vector &raw, + const std::vector> &raw_shapes, + std::vector &response_buffer) override { + float *const boxes = static_cast(raw.at(0)); + float *const scores = static_cast(raw.at(1)); + int64_t *const labels = static_cast(raw.at(2)); + const std::vector &boxes_shape = raw_shapes.at(0); + const std::vector &scores_shape = raw_shapes.at(1); + const std::vector &labels_shape = raw_shapes.at(2); + + size_t keep = 0; + while (keep < scores_shape[0] && scores[keep] >= score_threshold) + keep++; + + response_buffer.resize(7 * keep * sizeof(float)); + float *buffer = reinterpret_cast(response_buffer.data()); + for (size_t i = 0; i < keep; i++) { + int64_t label = labels[i]; + float *const box = &boxes[4 * i]; + buffer[7 * i + 0] = static_cast(index); + buffer[7 * i + 1] = box[1] / 800.0f; + buffer[7 * i + 2] = box[0] / 800.0f; + buffer[7 * i + 3] = box[3] / 800.0f; + buffer[7 * i + 4] = box[2] / 800.0f; + buffer[7 * i + 5] = scores[i]; + buffer[7 * i + 6] = static_cast(label); + } + } + +private: + size_t width; + size_t height; + float score_threshold; +}; + +#endif // MODEL_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/npy.h b/script/app-mlperf-inference-mlcommons-cpp/inc/npy.h new file mode 100644 index 0000000000..cb69db53b1 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/npy.h @@ -0,0 +1,143 @@ +/* + * Adapted from NVIDIA code. Original copyright notice: + * + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NPY_H_ +#define NPY_H_ + +#include +#include +#include +#include +#include +#include +#include + +// patch glog +#include +#define CHECK(x) if (x) {} else std::cerr + +namespace npy { + class NpyFile { + private: + std::string m_Path; + std::ifstream m_FStream; + size_t m_HeaderSize; + std::string m_Header; + size_t m_TensorSize; + size_t m_ElementSize; + std::vector m_TensorDims; + std::vector m_Cache; + public: + NpyFile(const std::string& path, bool cache = false) : m_Path(path), m_FStream(m_Path) { + // magic and fixed header + char b[256]; + m_FStream.read(b, 10); + CHECK(m_FStream) << "Unable to parse: " << m_Path; + + // check magic + CHECK(static_cast(b[0]) == 0x93 && b[1] == 'N' && b[2] == 'U' && b[3] == 'M' && b[4] == 'P' && b[5] == 'Y') << "Bad magic: " << m_Path; + + // get header + auto major = *reinterpret_cast(b + 6); + //auto minor = *reinterpret_cast(b + 7); + CHECK(major == 1) << "Only npy version 1 is supported: " << m_Path; + m_HeaderSize = *reinterpret_cast(b + 8); + m_Header.resize(m_HeaderSize); + // const cast for c++14 + m_FStream.read(const_cast(m_Header.data()), m_HeaderSize); + + // get file size + auto cur = m_FStream.tellg(); + m_FStream.seekg(0, std::ios::end); + auto size = m_FStream.tellg(); + m_TensorSize = size - cur; + + // cache result + if (cache) { + m_FStream.seekg(10 + m_HeaderSize, std::ios::beg); + m_Cache.resize(m_TensorSize); + m_FStream.read(m_Cache.data(), m_TensorSize); + CHECK(m_FStream) << "Unable to parse: " << m_Path; + CHECK(m_FStream.peek() == EOF) << "Did not consume full file: " << m_Path; + } + + // parse header + std::regex re(R"re(\{'descr': '[<|][fi]([\d])', 'fortran_order': False, 'shape': \(([\d, ]*)\), \} +\n)re"); + std::smatch matches; + CHECK(std::regex_match(m_Header, matches, re)) << "Cannot parse numpy header: " << m_Path; + CHECK(matches.size() == 3) << "Cannot parse numpy header: " << m_Path; + m_ElementSize = std::stoi(matches[1]); + std::vector dims = splitString(matches[2], ", "); + m_TensorDims.resize(dims.size()); + std::transform(dims.begin(), dims.end(), m_TensorDims.begin(), [](const std::string& s){ return std::stoi(s); }); + + // check header sanity + size_t tensorSize = std::accumulate(m_TensorDims.begin(), m_TensorDims.end(), m_ElementSize, std::multiplies()); + CHECK(tensorSize == m_TensorSize) << "Header description does not match file size: " << m_Path; + + } + ~NpyFile() { + m_FStream.close(); + }; + std::vector getDims() { + return m_TensorDims; + } + size_t getTensorSize() { + return m_TensorSize; + } + // load the entire tensor + void loadAll(std::vector& dst) { + m_FStream.seekg(10 + m_HeaderSize, std::ios::beg); + dst.resize(m_TensorSize); + m_FStream.read(dst.data(), m_TensorSize); + CHECK(m_FStream) << "Unable to parse: " << m_Path; + CHECK(m_FStream.peek() == EOF) << "Did not consume full file: " << m_Path; + } + // cache the entire tensor + void cacheAll() { + loadAll(m_Cache); + } + // load only selected indices from the Tensor, assuming that the first dim is batch dim. + void loadSamples(std::vector& dst, const std::vector& indices) { + if (m_Cache.empty()) { + cacheAll(); + } + size_t sampleSize = std::accumulate(m_TensorDims.begin() + 1, m_TensorDims.end(), m_ElementSize, std::multiplies()); + dst.resize(sampleSize * indices.size()); + for (size_t i = 0; i < indices.size(); i++) { + std::memcpy(dst.data() + i * sampleSize, m_Cache.data() + indices[i] * sampleSize, sampleSize); + } + } + // helper function to split a string based on a delimiting character + std::vector splitString(const std::string& input, const std::string& delimiter) + { + std::vector result; + size_t start = 0; + size_t next = 0; + while(next != std::string::npos) + { + next = input.find(delimiter, start); + result.emplace_back(input, start, next - start); + start = next + 1; + } + return result; + } + }; +} + +#endif // NPY_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h b/script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h new file mode 100644 index 0000000000..eab583897d --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h @@ -0,0 +1,132 @@ +#ifndef ONNXRUNTIME_BACKEND_H_ +#define ONNXRUNTIME_BACKEND_H_ + +#include +#include +#include + +#include "onnxruntime_cxx_api.h" + +#include "loadgen.h" + +#include "backend.h" + +class OnnxRuntimeBackend : public Backend { +public: + OnnxRuntimeBackend( + std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size, + bool use_cuda) + : Backend(model, device, performance_sample_count, batch_size) + , env(ORT_LOGGING_LEVEL_WARNING, "env") { + for (size_t i = 0; i < device->NumMemory(); i++) { + memory_infos.emplace_back( + use_cuda ? "Cuda" : "Cpu", + OrtAllocatorType::OrtArenaAllocator, i, OrtMemTypeDefault); + } + + for (size_t i = 0; i < device->NumConcurrency(); i++) { + Ort::SessionOptions session_options; + // arm64 does not work with optimization level 3 (ORT_ENABLE_ALL) + if (getenv("ORT_ENABLE_ALL", "") == "no") + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED); + + const auto &api = Ort::GetApi(); + + std::vector keys{"device_id"}; + std::vector values{std::to_string(i).c_str()}; + + OrtCUDAProviderOptionsV2 *cuda_options = nullptr; + if (use_cuda) { + Ort::ThrowOnError(api.CreateCUDAProviderOptions(&cuda_options)); + + Ort::ThrowOnError(api.UpdateCUDAProviderOptions(cuda_options, keys.data(), values.data(), keys.size())); + + Ort::ThrowOnError(api.SessionOptionsAppendExecutionProvider_CUDA_V2( + static_cast(session_options), + cuda_options)); + } + + sessions.emplace_back(env, model->model_path.c_str(), session_options); + bindings.emplace_back(sessions[i]); + + if (use_cuda) { + api.ReleaseCUDAProviderOptions(cuda_options); + } + } + } + + void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) override { + Ort::Session &session = sessions[concurrency_index]; + Ort::IoBinding &binding = bindings[concurrency_index]; + size_t memory_index = device->GetMemoryIndex(concurrency_index); + + for (size_t i = 0; i < model->num_inputs; i++) { + size_t size = batch.size() * GetSampleSize(batch.front().index, i); + const std::vector &shape = GetSampleShape(batch.front().index, i); + std::vector input_shape; + input_shape.push_back(batch.size()); + for (size_t dim : shape) + input_shape.push_back(dim); + ONNXTensorElementDataType input_type = + session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType(); + Ort::Value value = Ort::Value::CreateTensor( + memory_infos[memory_index], + batch_data[i], size, + input_shape.data(), input_shape.size(), + input_type); + binding.BindInput(model->input_names[i].c_str(), value); + } + + for (std::string &output : model->output_names) + binding.BindOutput(output.c_str(), memory_info_cpu); + + session.Run(Ort::RunOptions(), binding); + + std::vector outputs = binding.GetOutputValues(); + std::vector responses(batch.size()); + std::vector> response_buffers(batch.size()); + for (size_t i = 0; i < batch.size(); i++) { + // get output data and shapes + std::vector output_buffers(outputs.size()); + std::vector> output_shapes(outputs.size()); + for (size_t j = 0; j < outputs.size(); j++) { + // assume ith position in output is ith sample in batch + output_buffers[j] = + static_cast(outputs[j].GetTensorMutableData()) + + i * model->output_sizes[j]; + size_t rank = outputs[j].GetTensorTypeAndShapeInfo().GetDimensionsCount(); + std::vector output_shape(rank); + outputs[j].GetTensorTypeAndShapeInfo().GetDimensions(output_shape.data(), rank); + output_shapes[j].resize(rank); + for (size_t k = 0; k < rank; k++) + output_shapes[j][k] = output_shape[k]; + } + + model->PostProcess( + batch[i].index, output_buffers, output_shapes, response_buffers[i]); + + responses[i].id = batch[i].id; + responses[i].data = reinterpret_cast(response_buffers[i].data()); + responses[i].size = response_buffers[i].size(); + } + + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + + binding.ClearBoundInputs(); + binding.ClearBoundOutputs(); + }; + +private: + Ort::Env env; + std::vector sessions; + std::vector bindings; + std::vector memory_infos; + Ort::MemoryInfo memory_info_cpu{ + Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemTypeDefault)}; +}; + +#endif // ONNXRUNTIME_BACKEND_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h b/script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h new file mode 100644 index 0000000000..045ddc9618 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h @@ -0,0 +1,181 @@ +#ifndef SAMPLE_LIBRARY_H_ +#define SAMPLE_LIBRARY_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "query_sample_library.h" + +#include "backend.h" +#include "npy.h" + +/** + * SampleLibrary reads stored samples on request of LoadGen and passes + * them to Backend. Derived classes specify how samples are read (e.g. from .npy files) + */ +class SampleLibrary : public mlperf::QuerySampleLibrary { +public: + SampleLibrary( + const std::string &name, std::shared_ptr &backend, + size_t max_sample_count, size_t num_inputs) + : name(name), backend(backend), max_sample_count(max_sample_count), num_inputs(num_inputs) {} + + const std::string &Name() override { return name; } + size_t PerformanceSampleCount() override { return backend->PerformanceSampleCount(); } + size_t TotalSampleCount() override { + return max_sample_count != 0 ? std::min(max_sample_count, NumSamples()) : NumSamples(); + } + + void LoadSamplesToRam(const std::vector &samples) override { + std::cerr << "loading samples to ram with total sample size: " << samples.size()<< std::endl; + for (size_t i = 0; i < samples.size(); i++) { + mlperf::QuerySampleIndex sample = samples[i]; + std::vector> input_datas(num_inputs); + std::vector input_sizes(num_inputs); + std::vector> input_shapes(num_inputs); + for (size_t j = 0; j < num_inputs; j++) { + GetSample(sample, j, input_datas[j], input_sizes[j], input_shapes[j]); + } + backend->LoadSampleToRam(sample, input_datas, input_sizes, input_shapes); + } + backend->FinishLoading(); + } + + void UnloadSamplesFromRam(const std::vector &samples) override { + for (mlperf::QuerySampleIndex sample : samples){ + backend->UnloadSampleFromRam(sample); + } + } + + virtual size_t NumSamples() = 0; + + virtual void GetSample( + mlperf::QuerySampleIndex sample_index, + size_t input_index, + std::vector &data, + size_t &size, + std::vector &shape) = 0; + +protected: + std::shared_ptr backend; + size_t max_sample_count; + size_t num_inputs; + +private: + std::string name{"SampleLibrary"}; +}; + +class NumpyLibrary : public SampleLibrary { +public: + /** + * @brief Constructs a QSL with .npy files in a directory + * + * @param backend backend to use + * @param max_sample_count maximum library size (use 0 for unlimited size) + * @param preprocessed_path path to directory containing .npy files + * @param filenames filenames of npy files: / + */ + NumpyLibrary( + std::shared_ptr &backend, size_t max_sample_count, + std::string preprocessed_path, + const std::vector &filenames) + : SampleLibrary("NumpyLibrary", backend, max_sample_count, 1) { + for (std::string filename : filenames) { + std::string file_path = preprocessed_path + "/" + filename; + + std::ifstream f(file_path); + if (f.good()) + file_paths.push_back(file_path); + } + } + + size_t NumSamples() override { + return file_paths.size(); + } + + void GetSample( + mlperf::QuerySampleIndex sample_index, + size_t input_index, + std::vector &data, + size_t &size, + std::vector &shape) override { + npy::NpyFile data_file(file_paths[sample_index]); + std::vector data_char; + data_file.loadAll(data_char); + data.assign(data_char.begin(), data_char.end()); + size = data_file.getTensorSize(); + shape = data_file.getDims(); + } + +private: + std::vector file_paths; +}; + +class Imagenet : public NumpyLibrary { +public: + Imagenet( + std::shared_ptr &backend, size_t max_sample_count, + std::string preprocessed_path, std::string val_map_path) + : NumpyLibrary( + backend, max_sample_count, preprocessed_path, + ReadValMap(val_map_path)) { + std::cerr << "loaded imagenet with " << TotalSampleCount() << " samples" << std::endl; + } + +private: + static const std::vector ReadValMap(std::string val_map_path) { + std::vector filenames; + std::ifstream val_map(val_map_path); + std::string line; + std::regex val_map_regex(R"(\s*([\.\w]*)\s+(\d+)\s*)"); + while (std::getline(val_map, line)) { + std::smatch match; + std::regex_match(line, match, val_map_regex); + std::string image_filename = match[1]; + int64_t label = std::stoi(match[2]); + + filenames.push_back(image_filename + ".npy"); + } + return filenames; + } +}; + +class Openimages : public NumpyLibrary { +public: + Openimages( + std::shared_ptr &backend, size_t max_sample_count, + std::string preprocessed_path, std::string annotations_path) + : NumpyLibrary( + backend, max_sample_count, preprocessed_path, + ReadAnnotations(annotations_path, max_sample_count)) { + std::cerr << "loaded openimages with " << TotalSampleCount() << " samples" << std::endl; + } + +private: + static const std::vector ReadAnnotations( + std::string annotations_path, size_t max_sample_count) { + std::vector filenames; + std::ifstream val_map(annotations_path); + std::stringstream buffer; + buffer << val_map.rdbuf(); + std::string annotations = buffer.str(); + + std::regex image_regex(R"(\"file_name\": \"([^\"]*)\")"); + std::smatch match; + + while (std::regex_search(annotations, match, image_regex) && filenames.size() < max_sample_count) { + std::string image_filename = match[1]; + + filenames.push_back(image_filename + ".npy"); + annotations = match.suffix(); + } + return filenames; + } +}; + +#endif // SAMPLE_LIBRARY_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/system.h b/script/app-mlperf-inference-mlcommons-cpp/inc/system.h new file mode 100644 index 0000000000..b2bab3904c --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/system.h @@ -0,0 +1,135 @@ +#ifndef SYSTEM_H_ +#define SYSTEM_H_ + +#include +#include +#include +#include +#include +#include + +#include "system_under_test.h" + +#include "backend.h" + +/** + * A System class represents the policy by which requests from LoadGen are handled. + * + * StreamSUT immediately takes any LoadGen requests to device concurrency 0 + * (this is for single-stream). + * + * QueueSUT maintains a queue of LoadGen requests and dequeues requests to + * any available device concurrency. + */ +class System : public mlperf::SystemUnderTest { +public: + System(const std::string &name, std::shared_ptr &backend) + : name(name), backend(backend) {} + const std::string &Name() override { return name; } + +protected: + std::shared_ptr backend; + +private: + std::string name; +}; + +class StreamSUT : public System { +public: + StreamSUT(std::shared_ptr &backend) : System("StreamSUT", backend) {} + + void IssueQuery(const std::vector &samples) override { + size_t max_batch_size = backend->MaxBatchSize(); + size_t size = samples.size(); + + // for CUDA, set the device to use for this thread + backend->SetDeviceConcurrencyIndex(0); + + for (auto batch_begin = samples.begin(); batch_begin < samples.end(); batch_begin += max_batch_size) { + auto batch_end = std::min(batch_begin + max_batch_size, samples.end()); + const std::vector batch_queries(batch_begin, batch_end); + backend->IssueBatch(0, batch_queries); + } + } + + void FlushQueries() override {} +}; + +class QueueSUT : public System { +public: + QueueSUT(std::shared_ptr &backend) + : System("QueueSUT", backend) { + // launch threads + size_t num_threads = backend->NumConcurrency(); + for (size_t i = 0; i < num_threads; i++) + threads.emplace_back(&QueueSUT::ThreadStart, this, i); + } + + ~QueueSUT() override { + { + std::lock_guard lock(queries_mutex); + done = true; + } + has_queries.notify_all(); + + for (std::thread &thread : threads) + thread.join(); + } + + void IssueQuery(const std::vector &samples) override { + // enqueue queries + { + std::lock_guard lock(queries_mutex); + queries.insert(queries.end(), samples.begin(), samples.end()); + } + has_queries.notify_one(); + } + + void FlushQueries() override {} + +private: + void ThreadStart(size_t concurrency_index) { + size_t max_batch_size = backend->MaxBatchSize(); + std::vector batch_queries; + batch_queries.reserve(max_batch_size); + size_t size; + + // for CUDA, set the device to use for this thread + backend->SetDeviceConcurrencyIndex(concurrency_index); + + while (true) { + // dequeue queries + { + std::unique_lock lock(queries_mutex); + has_queries.wait(lock, [&]() { return !queries.empty() || done; }); + + if (done) + break; + + // load a batch of queries to batch_queries + size = std::min(max_batch_size, queries.size()); + auto begin = queries.begin(); + auto end = begin + size; + batch_queries.assign(begin, end); + queries.erase(begin, end); + } + has_queries.notify_one(); + + // compute response from batch_queries + // and log to LoadGen + backend->IssueBatch(concurrency_index, batch_queries); + } + } + + // queue of incoming queries + std::deque queries; + std::mutex queries_mutex; + std::condition_variable has_queries; + + // worker threads to process queries + std::vector threads; + + bool done{false}; +}; + +#endif // SYSTEM_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h b/script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h new file mode 100644 index 0000000000..eab583897d --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h @@ -0,0 +1,132 @@ +#ifndef ONNXRUNTIME_BACKEND_H_ +#define ONNXRUNTIME_BACKEND_H_ + +#include +#include +#include + +#include "onnxruntime_cxx_api.h" + +#include "loadgen.h" + +#include "backend.h" + +class OnnxRuntimeBackend : public Backend { +public: + OnnxRuntimeBackend( + std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size, + bool use_cuda) + : Backend(model, device, performance_sample_count, batch_size) + , env(ORT_LOGGING_LEVEL_WARNING, "env") { + for (size_t i = 0; i < device->NumMemory(); i++) { + memory_infos.emplace_back( + use_cuda ? "Cuda" : "Cpu", + OrtAllocatorType::OrtArenaAllocator, i, OrtMemTypeDefault); + } + + for (size_t i = 0; i < device->NumConcurrency(); i++) { + Ort::SessionOptions session_options; + // arm64 does not work with optimization level 3 (ORT_ENABLE_ALL) + if (getenv("ORT_ENABLE_ALL", "") == "no") + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED); + + const auto &api = Ort::GetApi(); + + std::vector keys{"device_id"}; + std::vector values{std::to_string(i).c_str()}; + + OrtCUDAProviderOptionsV2 *cuda_options = nullptr; + if (use_cuda) { + Ort::ThrowOnError(api.CreateCUDAProviderOptions(&cuda_options)); + + Ort::ThrowOnError(api.UpdateCUDAProviderOptions(cuda_options, keys.data(), values.data(), keys.size())); + + Ort::ThrowOnError(api.SessionOptionsAppendExecutionProvider_CUDA_V2( + static_cast(session_options), + cuda_options)); + } + + sessions.emplace_back(env, model->model_path.c_str(), session_options); + bindings.emplace_back(sessions[i]); + + if (use_cuda) { + api.ReleaseCUDAProviderOptions(cuda_options); + } + } + } + + void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) override { + Ort::Session &session = sessions[concurrency_index]; + Ort::IoBinding &binding = bindings[concurrency_index]; + size_t memory_index = device->GetMemoryIndex(concurrency_index); + + for (size_t i = 0; i < model->num_inputs; i++) { + size_t size = batch.size() * GetSampleSize(batch.front().index, i); + const std::vector &shape = GetSampleShape(batch.front().index, i); + std::vector input_shape; + input_shape.push_back(batch.size()); + for (size_t dim : shape) + input_shape.push_back(dim); + ONNXTensorElementDataType input_type = + session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType(); + Ort::Value value = Ort::Value::CreateTensor( + memory_infos[memory_index], + batch_data[i], size, + input_shape.data(), input_shape.size(), + input_type); + binding.BindInput(model->input_names[i].c_str(), value); + } + + for (std::string &output : model->output_names) + binding.BindOutput(output.c_str(), memory_info_cpu); + + session.Run(Ort::RunOptions(), binding); + + std::vector outputs = binding.GetOutputValues(); + std::vector responses(batch.size()); + std::vector> response_buffers(batch.size()); + for (size_t i = 0; i < batch.size(); i++) { + // get output data and shapes + std::vector output_buffers(outputs.size()); + std::vector> output_shapes(outputs.size()); + for (size_t j = 0; j < outputs.size(); j++) { + // assume ith position in output is ith sample in batch + output_buffers[j] = + static_cast(outputs[j].GetTensorMutableData()) + + i * model->output_sizes[j]; + size_t rank = outputs[j].GetTensorTypeAndShapeInfo().GetDimensionsCount(); + std::vector output_shape(rank); + outputs[j].GetTensorTypeAndShapeInfo().GetDimensions(output_shape.data(), rank); + output_shapes[j].resize(rank); + for (size_t k = 0; k < rank; k++) + output_shapes[j][k] = output_shape[k]; + } + + model->PostProcess( + batch[i].index, output_buffers, output_shapes, response_buffers[i]); + + responses[i].id = batch[i].id; + responses[i].data = reinterpret_cast(response_buffers[i].data()); + responses[i].size = response_buffers[i].size(); + } + + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + + binding.ClearBoundInputs(); + binding.ClearBoundOutputs(); + }; + +private: + Ort::Env env; + std::vector sessions; + std::vector bindings; + std::vector memory_infos; + Ort::MemoryInfo memory_info_cpu{ + Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemTypeDefault)}; +}; + +#endif // ONNXRUNTIME_BACKEND_H_ diff --git a/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp b/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp new file mode 100644 index 0000000000..c5a3c809ea --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp @@ -0,0 +1,214 @@ +#include +#include +#include +#include + +#include "loadgen.h" +#include "test_settings.h" +#include "common.h" +#include "backend.h" +#include "device.h" +#include "model.h" +#include "sample_library.h" +#include "system.h" +#ifdef CM_MLPERF_DEVICE_GPU + #include "gpu_device.h" +#endif + +#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME + #include "onnxruntime_backend.h" +#endif + +class InputSettings { + +public: + InputSettings() { + mlperf_conf_path = getenv("CM_MLPERF_CONF", "../inference/mlperf.conf"); + user_conf_path = getenv("CM_MLPERF_USER_CONF", "../inference/vision/classification_and_detection/user.conf"); + audit_conf_path = getenv("CM_MLPERF_INFERENCE_AUDIT_PATH", ""); + output_dir = getenv("CM_MLPERF_OUTPUT_DIR", "."); + backend_name = getenv("CM_MLPERF_BACKEND", "onnxruntime"); + device_name = getenv("CM_MLPERF_DEVICE", "cpu"); + model_name = getenv("CM_MODEL", "resnet50"); + model_path = getenv("CM_ML_MODEL_FILE_WITH_PATH", ""); + dataset_preprocessed_path = getenv("CM_DATASET_PREPROCESSED_PATH", ""); + dataset_path = getenv("CM_DATASET_PATH", ""); + dataset_list = getenv("CM_DATASET_LIST", ""); + imagenet_val_path = getenv("CM_DATASET_AUX_PATH", "") + "/val.txt"; + scenario_name = getenv("CM_MLPERF_LOADGEN_SCENARIO", "Offline"); + mode_name = getenv("CM_MLPERF_LOADGEN_MODE", "PerformanceOnly"); + if (mode_name == "accuracy") + mode_name = "AccuracyOnly"; + if (mode_name == "performance") + mode_name = "PerformanceOnly"; + query_count_override = std::stol(getenv("CM_MLPERF_LOADGEN_QUERY_COUNT", "0")); + query_count_override = 0; + performance_sample_count = std::stol(getenv("CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", "0")); + batch_size = std::stol(getenv("CM_MLPERF_LOADGEN_MAX_BATCHSIZE", "32")); + std::cout << "MLPerf Conf path: " << mlperf_conf_path << std::endl; + std::cout << "User Conf path: " << user_conf_path << std::endl; + std::cout << "Dataset Preprocessed path: " << dataset_preprocessed_path << std::endl; + std::cout << "Dataset List filepath: " << dataset_list << std::endl; + std::cout << "Scenario: " << scenario_name << std::endl; + std::cout << "Mode: " << mode_name << std::endl; + std::cout << "Batch size: " << batch_size << std::endl; + std::cout << "Query count override: " << query_count_override << std::endl; + std::cout << "Performance sample count override in application: " << performance_sample_count << std::endl; + } + + std::string mlperf_conf_path; + std::string user_conf_path; + std::string audit_conf_path; + std::string output_dir; + std::string backend_name; + std::string device_name; + std::string model_name; + std::string model_path; + std::string dataset_preprocessed_path; + std::string dataset_path; + std::string dataset_list; + std::string imagenet_val_path; + std::string scenario_name; + std::string mode_name; + size_t performance_sample_count; + size_t batch_size; + size_t query_count_override; +}; + +int main(int argc, const char *argv[]) { + // configure test settings + InputSettings input_settings; + mlperf::TestSettings test_settings; + test_settings.scenario = + input_settings.scenario_name == "SingleStream" ? mlperf::TestScenario::SingleStream : + input_settings.scenario_name == "MultiStream" ? mlperf::TestScenario::MultiStream : + input_settings.scenario_name == "Server" ? mlperf::TestScenario::Server : + input_settings.scenario_name == "Offline" ? mlperf::TestScenario::Offline : + mlperf::TestScenario::SingleStream; + test_settings.mode = + input_settings.mode_name == "SubmissionRun" ? mlperf::TestMode::SubmissionRun : + input_settings.mode_name == "AccuracyOnly" ? mlperf::TestMode::AccuracyOnly : + input_settings.mode_name == "PerformanceOnly" ? mlperf::TestMode::PerformanceOnly : + input_settings.mode_name == "FindPeakPerformance" ? mlperf::TestMode::FindPeakPerformance : + mlperf::TestMode::SubmissionRun; + + // read test settings from mlperf.conf and user.conf + if (test_settings.FromConfig(input_settings.mlperf_conf_path, input_settings.model_name, input_settings.scenario_name)) { + std::cerr << "Could not read mlperf.conf at " << input_settings.mlperf_conf_path << std::endl; + return 1; + } + if (test_settings.FromConfig(input_settings.user_conf_path, input_settings.model_name, input_settings.scenario_name)) { + std::cerr << "Could not read user.conf at " << input_settings.user_conf_path << std::endl; + return 1; + } + + // configure log settings + mlperf::LogSettings log_settings; + log_settings.log_output.outdir = input_settings.output_dir; + + // build model + std::shared_ptr model; + if (input_settings.model_name == "resnet50") { + model.reset(new Resnet50(input_settings.model_path, -1)); + // can change model params here + // e.g. if (backend == "torch") { + // model.reset(new Resnet50(input_settings.model_path, 0)); + // model->input_names = {"image"}; + // } + } else if (input_settings.model_name == "retinanet") { + // onnx retinanet requires batch size 1 + if (input_settings.backend_name == "onnxruntime" && input_settings.batch_size != 1) { + std::cerr << "onnx retinanet requires batch size 1" + << " (current batch size: " << input_settings.batch_size << ")" << std::endl; + return 1; + } + model.reset(new Retinanet(input_settings.model_path, 800, 800, 0.05f)); + } else { + std::cerr << "model (" << input_settings.model_name << ") not supported" << std::endl; + return 1; + } + + // build device + std::shared_ptr device; + if (input_settings.device_name == "cpu") { + device.reset(new CPUDevice()); + } else if (input_settings.device_name == "gpu") { +#ifdef CM_MLPERF_DEVICE_GPU + device.reset(new GPUDevice()); +#endif + } else { + std::cerr << "device (" << input_settings.device_name << ") not supported" << std::endl; + return 1; + } + + // get counts + if (input_settings.query_count_override != 0) + test_settings.max_query_count = input_settings.query_count_override; + size_t max_sample_count = test_settings.max_query_count; + size_t performance_sample_count = + test_settings.performance_sample_count_override != 0 ? + test_settings.performance_sample_count_override : + input_settings.performance_sample_count; + + if (performance_sample_count != 0) {//Its changed from user.conf + //test_settings.performance_sample_count_override = performance_sample_count; + } + if (max_sample_count != 0) + performance_sample_count = + std::min(performance_sample_count, max_sample_count); + if (max_sample_count == 0) + max_sample_count = INT_MAX; + // build backend + std::shared_ptr backend; + if (input_settings.backend_name == "onnxruntime") { +#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME + backend.reset(new OnnxRuntimeBackend( + model, device, performance_sample_count, input_settings.batch_size, + input_settings.device_name == "gpu")); +#endif + } else { + std::cerr << "backend (" << input_settings.backend_name << ") not supported" << std::endl; + return 1; + } + + // build QSL + std::shared_ptr qsl; + if (input_settings.model_name == "resnet50") { + qsl.reset(new Imagenet( + backend, max_sample_count, + input_settings.dataset_preprocessed_path, + input_settings.imagenet_val_path)); + } else if (input_settings.model_name == "retinanet") { + qsl.reset(new Openimages( + backend, max_sample_count, + input_settings.dataset_preprocessed_path, + input_settings.dataset_list)); + } else { + std::cerr << "dataset for model (" + << input_settings.model_name << ") not supported" << std::endl; + return 1; + } + + // sanity check: common problem in workflow + if (qsl->TotalSampleCount() == 0) { + std::cerr << "error: 0 samples found in dataset" << std::endl; + return 1; + } + if (qsl->PerformanceSampleCount() == 0) { + std::cerr << "error: performance sample count = 0" << std::endl; + return 1; + } + + // build SUT + // using QueueSUT for all scenarios except for StreamSUT for single-stream + std::shared_ptr sut; + if (input_settings.scenario_name == "SingleStream") { + sut.reset(new StreamSUT(backend)); + } else { + sut.reset(new QueueSUT(backend)); + } + + // start benchmark + std::cerr << "starting benchmark" << std::endl; + mlperf::StartTest(sut.get(), qsl.get(), test_settings, log_settings, input_settings.audit_conf_path); +} diff --git a/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat b/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat new file mode 100644 index 0000000000..08dc944a4b --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat @@ -0,0 +1,8 @@ +rem TBD: current not compiling - need to check ... + +cmr "install llvm prebuilt" --version=16.0.4 +cmr "install llvm prebuilt" --version=17.0.6 + +cmr "get lib onnxruntime lang-cpp _cpu" --version=1.11.1 +cmr "get lib onnxruntime lang-cpp _cpu" --version=1.13.1 +cmr "get lib onnxruntime lang-cpp _cpu" --version=1.15.1 diff --git a/script/app-mlperf-inference-mlcommons-python/README-about.md b/script/app-mlperf-inference-mlcommons-python/README-about.md new file mode 100644 index 0000000000..77ba7ea07c --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-python/README-about.md @@ -0,0 +1,7 @@ +This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) +using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). +The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks +across diverse platforms with continuously changing software and hardware. + +See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). diff --git a/script/app-mlperf-inference-mlcommons-python/README-extra.md b/script/app-mlperf-inference-mlcommons-python/README-extra.md new file mode 100644 index 0000000000..4a97066387 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-python/README-extra.md @@ -0,0 +1,235 @@ +# About + +This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) +using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). +The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks +across diverse platforms with continuously changing software and hardware. + +# Current Coverage + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelDeviceBackendModel PrecisionStatusComments
ResNet50CPUOnnxruntimefp32Works on all tested versions
Tensorflowfp32Works on all tested versions
PytorchNReference Implementation missing
CUDAOnnxruntimefp32Works on all tested versions
Tensorflowfp32Works on all tested versions
PytorchNReference Implementation missing
RetinaNetCPUOnnxruntimefp32Works on all tested versions
Tensorflowfp32Not Implemented
Pytorchfp32Works on all tested versions
CUDAOnnxruntimefp32Works on all tested versions
Tensorflowfp32Not Implemented
Pytorchfp32Works on all tested versions
BertCPUOnnxruntimefp32Works on all tested versions
int8Works on all tested versions
Tensorflowfp32 +Works with protobuf 3.19. Issue mentioned here +
Pytorchfp32 +Works on all tested versions +
CUDAOnnxruntimefp32Works on all tested versions +
int8Works on all tested versions +
Tensorflowfp32Not tested
Pytorchfp32Works on all tested versions +
3d-unetCPUOnnxruntimefp32Works on all tested versions
Tensorflowfp32 +Works on all tested versions +
Pytorchfp32 +Works on all tested versions +
CUDAOnnxruntimefp32 +Works on all tested versions +
Tensorflowfp32 +Works on all tested versions +
Pytorchfp32 +Works on all tested versions +
RnntCPUPytorchfp32Works on all tested versions
DLRMCPUPytorchfp32Works with torch 1.10 and numpy 1.19
CUDAPytorchfp32?Needs GPU with high memory capacity
+ +Please follow our R&D roadmap [here](https://github.com/mlcommons/ck/issues/536). + + + diff --git a/script/app-mlperf-inference-mlcommons-python/README.md b/script/app-mlperf-inference-mlcommons-python/README.md new file mode 100644 index 0000000000..ed11d669d9 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-python/README.md @@ -0,0 +1,886 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-python** + +Category: **Modular MLPerf inference benchmark pipeline** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference-mlcommons-python,ff149e9781fc4b65) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- + +This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) +using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). +The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks +across diverse platforms with continuously changing software and hardware. + +See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-python)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *app,vision,language,mlcommons,mlperf,inference,reference,ref* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf inference reference ref" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,vision,language,mlcommons,mlperf,inference,reference,ref` + +`cm run script --tags=app,vision,language,mlcommons,mlperf,inference,reference,ref[,variations] [--input_flags]` + +*or* + +`cmr "app vision language mlcommons mlperf inference reference ref"` + +`cmr "app vision language mlcommons mlperf inference reference ref [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,inference,reference,ref' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,vision,language,mlcommons,mlperf,inference,reference,ref"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,vision,language,mlcommons,mlperf,inference,reference,ref) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app vision language mlcommons mlperf inference reference ref[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_gptj_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.datasets + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.attrs + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.accelerate + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_llama2-70b_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.transformers + * CM names: `--adr.['transformers']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.datasets + * CM names: `--adr.['datasets']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.sentencepiece + * CM names: `--adr.['sentencepiece']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.protobuf + * CM names: `--adr.['protobuf']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.accelerate + * CM names: `--adr.['accelerate']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.absl-py + * CM names: `--adr.['absl-py']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.evaluate + * CM names: `--adr.['evaluate']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.nltk + * CM names: `--adr.['nltk']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.rouge-score + * CM names: `--adr.['rouge-score']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_3d-unet` + - Environment variables: + - *CM_TMP_IGNORE_MLPERF_QUERY_COUNT*: `True` + - *CM_MLPERF_MODEL_SKIP_BATCHING*: `True` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.nibabel + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_beam_size.#` + - Environment variables: + - *GPTJ_BEAM_SIZE*: `#` + - Workflow: + * `_bert` + - Environment variables: + - *CM_MLPERF_MODEL_SKIP_BATCHING*: `True` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.pydantic + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tokenization + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_six + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.absl-py + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_protobuf + * `if (CM_MLPERF_BACKEND in ['tf', 'tflite'])` + * CM names: `--adr.['protobuf']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_boto3 + * `if (CM_MLPERF_BACKEND == pytorch)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + * `if (CM_MLPERF_DEVICE != gpu)` + * CM names: `--adr.['ml-engine-pytorch', 'pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_dlrm` + - Environment variables: + - *CM_MLPERF_MODEL_SKIP_BATCHING*: `True` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dlrm,src + * CM names: `--adr.['dlrm-src']...` + - CM script: [get-dlrm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dlrm) + * get,generic-python-lib,_mlperf_logging + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tensorboard + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_protobuf + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_scikit-learn + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tqdm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.torchrec + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.pyre-extensions + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.torchsnapshot + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_llama2-70b_,cuda` + - Workflow: + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * `_offline` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_onnxruntime,cpu` + - Environment variables: + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_onnxruntime,cuda` + - Environment variables: + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - *ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER*: `CUDAExecutionProvider` + - Workflow: + * `_onnxruntime,rocm` + - Environment variables: + - *ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER*: `ROCMExecutionProvider` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_pytorch,rocm` + - Workflow: + * `_r2.1_default` + - Environment variables: + - *CM_RERUN*: `yes` + - *CM_SKIP_SYS_UTILS*: `yes` + - *CM_TEST_QUERY_COUNT*: `100` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + * `_tf,rocm` + - Environment variables: + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_tpu,tflite` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_MLPERF_LOADGEN_MAX_BATCHSIZE*: `#` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - *CUDA_VISIBLE_DEVICES*: `` + - *USE_CUDA*: `False` + - *USE_GPU*: `False` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *USE_CUDA*: `True` + - *USE_GPU*: `True` + - Workflow: + * `_rocm` + - Environment variables: + - *CM_MLPERF_DEVICE*: `rocm` + - *USE_GPU*: `True` + - Workflow: + * `_tpu` + - Environment variables: + - *CM_MLPERF_DEVICE*: `tpu` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_deepsparse` + - Environment variables: + - *CM_MLPERF_BACKEND*: `deepsparse` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_deepsparse + * `if (CM_HOST_PLATFORM_FLAVOR != aarch64)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.deepsparse-nightly + * `if (CM_HOST_PLATFORM_FLAVOR == aarch64)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_ncnn` + - Environment variables: + - *CM_MLPERF_BACKEND*: `ncnn` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - *CM_MLPERF_VISION_DATASET_OPTION*: `imagenet_pytorch` + - Workflow: + * **`_onnxruntime`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `onnxruntime` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_ray` + - Environment variables: + - *CM_MLPERF_BACKEND*: `ray` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_tf` + - Aliases: `_tensorflow` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tf` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_tflite` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tflite` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - *CM_MLPERF_VISION_DATASET_OPTION*: `imagenet_tflite_tpu` + - Workflow: + * `_tvm-onnx` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tvm-onnx` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,tvm + * CM names: `--adr.['tvm']...` + - CM script: [get-tvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm) + * get,tvm-model,_onnx + * CM names: `--adr.['tvm-model']...` + - CM script: [get-tvm-model](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm-model) + * `_tvm-pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tvm-pytorch` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - *CM_PREPROCESS_PYTORCH*: `yes` + - *MLPERF_TVM_TORCH_QUANTIZED_ENGINE*: `qnnpack` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,tvm + * CM names: `--adr.['tvm']...` + - CM script: [get-tvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm) + * get,tvm-model,_pytorch + * CM names: `--adr.['tvm-model']...` + - CM script: [get-tvm-model](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm-model) + * `_tvm-tflite` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tvm-tflite` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_tflite + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,tvm + * CM names: `--adr.['tvm']...` + - CM script: [get-tvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm) + * get,tvm-model,_tflite + * CM names: `--adr.['tvm-model']...` + - CM script: [get-tvm-model](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm-model) + +
+ + + * Group "**implementation**" +
+ Click here to expand this section. + + * **`_python`** (default) + - Environment variables: + - *CM_MLPERF_PYTHON*: `yes` + - *CM_MLPERF_IMPLEMENTATION*: `reference` + - Workflow: + +
+ + + * Group "**models**" +
+ Click here to expand this section. + + * `_3d-unet-99` + - Environment variables: + - *CM_MODEL*: `3d-unet-99` + - Workflow: + * `_3d-unet-99.9` + - Environment variables: + - *CM_MODEL*: `3d-unet-99.9` + - Workflow: + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - Workflow: + * `_dlrm-99` + - Environment variables: + - *CM_MODEL*: `dlrm-99` + - Workflow: + * `_dlrm-99.9` + - Environment variables: + - *CM_MODEL*: `dlrm-99.9` + - Workflow: + * `_gptj-99` + - Environment variables: + - *CM_MODEL*: `gptj-99` + - Workflow: + * `_gptj-99.9` + - Environment variables: + - *CM_MODEL*: `gptj-99.9` + - Workflow: + * `_llama2-70b-99` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99` + - Workflow: + * `_llama2-70b-99.9` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99.9` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - *CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pycocotools + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Read "prehook_deps" on other CM scripts*** + * get,generic-python-lib,_protobuf + * `if (CM_MLPERF_BACKEND in ['tf', 'tflite'])` + * CM names: `--adr.['protobuf']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - *CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT*: `yes` + - *CM_MLPERF_LOADGEN_MAX_BATCHSIZE*: `1` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pycocotools + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_rnnt` + - Environment variables: + - *CM_MODEL*: `rnnt` + - *CM_MLPERF_MODEL_SKIP_BATCHING*: `True` + - *CM_TMP_IGNORE_MLPERF_QUERY_COUNT*: `True` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.pydantic + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_librosa + * CM names: `--adr.['librosa']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_inflect + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_unidecode + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_toml + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_sdxl` + - Environment variables: + - *CM_MODEL*: `stable-diffusion-xl` + - *CM_NUM_THREADS*: `1` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.diffusers + * CM names: `--adr.['diffusers']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.transformers + * CM names: `--adr.['transformers']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.accelerate + * CM names: `--adr.['accelerate']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.torchmetrics + * CM names: `--adr.['torchmetrics']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.torch-fidelity + * CM names: `--adr.['torch-fidelity']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.open_clip_torch + * CM names: `--adr.['open-clip']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.opencv-python + * CM names: `--adr.['opencv-python']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.scipy + * CM names: `--adr.['scipy']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**network**" +
+ Click here to expand this section. + + * `_network-lon` + - Environment variables: + - *CM_NETWORK_LOADGEN*: `lon` + - *CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1*: `network_loadgen` + - Workflow: + * `_network-sut` + - Environment variables: + - *CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1*: `network_sut` + - *CM_NETWORK_LOADGEN*: `sut` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_bfloat16` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `False` + - *CM_MLPERF_MODEL_PRECISION*: `bfloat16` + - Workflow: + * `_float16` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `False` + - *CM_MLPERF_MODEL_PRECISION*: `float16` + - Workflow: + * **`_fp32`** (default) + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `False` + - *CM_MLPERF_MODEL_PRECISION*: `float32` + - Workflow: + * `_int8` + - Aliases: `_quantized` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `True` + - *CM_MLPERF_MODEL_PRECISION*: `int8` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_fp32,_onnxruntime,_python,_resnet50` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--dataset=value` → `CM_MLPERF_VISION_DATASET_OPTION=value` +* `--dataset_args=value` → `CM_MLPERF_EXTRA_DATASET_ARGS=value` +* `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` +* `--hw_name=value` → `CM_HW_NAME=value` +* `--imagenet_path=value` → `IMAGENET_PATH=value` +* `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--network=value` → `CM_NETWORK_LOADGEN=value` +* `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` +* `--num_threads=value` → `CM_NUM_THREADS=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `OUTPUT_BASE_DIR=value` +* `--power=value` → `CM_MLPERF_POWER=value` +* `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` +* `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` +* `--rerun=value` → `CM_RERUN=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` +* `--threads=value` → `CM_NUM_THREADS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "clean":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_LOADGEN_MODE: `accuracy` +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_OUTPUT_FOLDER_NAME: `test_results` +* CM_MLPERF_RUN_STYLE: `test` +* CM_TEST_QUERY_COUNT: `10` +* CM_MLPERF_QUANTIZATION: `False` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference` +* CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: `` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cuda,_cudnn + * `if (CM_MLPERF_DEVICE == gpu AND CM_MLPERF_BACKEND in ['onnxruntime', 'tf', 'tflite', 'pytorch'])` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,nvidia,tensorrt + * `if (CM_MLPERF_BACKEND == tensorrt)` + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + * get,generic-python-lib,_onnxruntime + * `if (CM_MLPERF_BACKEND in ['onnxruntime', 'tvm-onnx'] AND CM_MLPERF_DEVICE in ['cpu', 'rocm'])` + * CM names: `--adr.['ml-engine-onnxruntime', 'onnxruntime']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnxruntime_gpu + * `if (CM_MLPERF_BACKEND in ['onnxruntime', 'tvm-onnx'] AND CM_MLPERF_DEVICE == gpu) AND (CM_MODEL not in ['3d-unet-99', '3d-unet-99.9'])` + * CM names: `--adr.['ml-engine-onnxruntime-cuda']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnxruntime + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == gpu AND CM_MODEL in ['3d-unet-99', '3d-unet-99.9', 'resnet50'])` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnxruntime_gpu + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == gpu AND CM_MODEL in ['3d-unet-99', '3d-unet-99.9', 'resnet50'])` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + * `if (CM_MLPERF_BACKEND in ['pytorch', 'tvm-pytorch'] AND CM_MLPERF_DEVICE in ['cpu', 'rocm'])` + * CM names: `--adr.['ml-engine-pytorch', 'pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch_cuda + * `if (CM_MLPERF_BACKEND in ['pytorch', 'tvm-pytorch', 'ray'] AND CM_MLPERF_DEVICE == gpu)` + * CM names: `--adr.['ml-engine-pytorch', 'pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + * `if (CM_MLPERF_BACKEND in ['pytorch', 'tvm-pytorch'] AND CM_MLPERF_DEVICE == cpu)` + * CM names: `--adr.['ml-engine-torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision_cuda + * `if (CM_MLPERF_BACKEND in ['pytorch', 'tvm-pytorch', 'ray'] AND CM_MLPERF_DEVICE == gpu)` + * CM names: `--adr.['ml-engine-torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tensorrt + * `if (CM_MLPERF_BACKEND == ray)` + * CM names: `--adr.['ml-engine-tensorrt']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch_tensorrt + * `if (CM_MLPERF_BACKEND == ray)` + * CM names: `--adr.['ml-engine-torch_tensorrt']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_ray + * `if (CM_MLPERF_BACKEND == ray)` + * CM names: `--adr.['ray']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_async_timeout + * `if (CM_MLPERF_BACKEND == ray)` + * CM names: `--adr.['async_timeout']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_transformers + * `if (CM_MODEL in ['bert-99', 'bert-99.9', 'gptj-99', 'gptj-99.9'])` + * CM names: `--adr.['ml-engine-transformers']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tensorflow + * `if (CM_MLPERF_BACKEND in ['tf', 'tflite'])` + * CM names: `--adr.['ml-engine-tensorflow', 'tensorflow']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.ncnn + * `if (CM_MLPERF_BACKEND == ncnn)` + * CM names: `--adr.['ml-engine-ncnn']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,ml-model,neural-magic,zoo + * `if (CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB == on)` + * CM names: `--adr.['custom-ml-model']...` + - CM script: [get-ml-model-neuralmagic-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-neuralmagic-zoo) + * get,ml-model,image-classification,resnet50 + * `if (CM_MODEL == resnet50) AND (CM_MLPERF_CUSTOM_MODEL_PATH != on)` + * CM names: `--adr.['ml-model', 'resnet50-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,ml-model,object-detection,retinanet + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['ml-model', 'retinanet-model']...` + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + * get,ml-model,large-language-model,gptj + * `if (CM_MODEL in ['gptj-99', 'gptj-99.9'])` + * CM names: `--adr.['ml-model', 'gptj-model', 'gpt-j-model']...` + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + * get,ml-model,object-detection,resnext50,fp32,_pytorch-weights + * `if (CM_MLPERF_BACKEND == pytorch AND CM_MLPERF_IMPLEMENTATION == nvidia AND CM_MODEL == retinanet)` + * CM names: `--adr.['ml-model', 'retinanet-model']...` + - *Warning: no scripts found* + * get,ml-model,language-processing,bert-large + * `if (CM_MODEL in ['bert-99', 'bert-99.9']) AND (CM_MLPERF_CUSTOM_MODEL_PATH != on)` + * CM names: `--adr.['ml-model', 'bert-model']...` + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + * get,ml-model,stable-diffusion,text-to-image,sdxl + * `if (CM_MODEL == stable-diffusion-xl) AND (CM_MLPERF_CUSTOM_MODEL_PATH != on)` + * CM names: `--adr.['ml-model', 'sdxl-model']...` + - CM script: [get-ml-model-stable-diffusion](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-stable-diffusion) + * get,ml-model,llama2 + * `if (CM_MODEL in ['llama2-70b-99', 'llama2-70b-99.9']) AND (CM_MLPERF_CUSTOM_MODEL_PATH != on)` + * CM names: `--adr.['ml-model', 'llama2-model']...` + - CM script: [get-ml-model-llama2](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-llama2) + * get,ml-model,medical-imaging,3d-unet + * `if (CM_MODEL in ['3d-unet-99', '3d-unet-99.9'])` + * CM names: `--adr.['ml-model', '3d-unet-model']...` + - CM script: [get-ml-model-3d-unet-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-3d-unet-kits19) + * get,ml-model,speech-recognition,rnnt + * `if (CM_MODEL == rnnt)` + * CM names: `--adr.['ml-model', 'rnnt-model']...` + - CM script: [get-ml-model-rnnt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-rnnt) + * get,ml-model,recommendation,dlrm + * `if (CM_MODEL in ['dlrm-99', 'dlrm-99.9'])` + * CM names: `--adr.['ml-model', 'dlrm-model']...` + - CM script: [get-ml-model-dlrm-terabyte](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-dlrm-terabyte) + * get,dataset,image-classification,imagenet,preprocessed + * `if (CM_MODEL == resnet50) AND (CM_MLPERF_VISION_DATASET_OPTION != True)` + * CM names: `--adr.['imagenet-preprocessed']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,dataset,image-classification,imagenet,preprocessed,_pytorch + * `if (CM_MODEL == resnet50 AND CM_MLPERF_VISION_DATASET_OPTION == imagenet_pytorch)` + * CM names: `--adr.['imagenet-preprocessed']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,dataset-aux,image-classification,imagenet-aux + * `if (CM_MODEL == resnet50)` + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + * get,dataset,object-detection,open-images,openimages,preprocessed,_validation + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['openimages-preprocessed']...` + - CM script: [get-preprocessed-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openimages) + * get,dataset,cnndm,_validation + * `if (CM_MODEL in ['gptj-99', 'gptj-99.9'])` + * CM names: `--adr.['cnndm-preprocessed']...` + - CM script: [get-dataset-cnndm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm) + * get,dataset,squad,original + * `if (CM_MODEL in ['bert-99', 'bert-99.9'])` + * CM names: `--adr.['cnndm-preprocessed']...` + - CM script: [get-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad) + * get,dataset-aux,squad-vocab + * `if (CM_MODEL in ['bert-99', 'bert-99.9'])` + - CM script: [get-dataset-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad-vocab) + * get,dataset,coco2014,_validation + * `if (CM_MODEL == stable-diffusion-xl)` + * CM names: `--adr.['coco2014-preprocessed']...` + - CM script: [get-dataset-coco2014](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-coco2014) + * get,preprocessed,dataset,openorca,_validation + * `if (CM_MODEL in ['llama2-70b-99', 'llama2-70b-99.9'])` + * CM names: `--adr.['openorca-preprocessed']...` + - CM script: [get-preprocessed-dataset-openorca](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openorca) + * get,dataset,kits19,preprocessed + * `if (CM_MODEL in ['3d-unet-99', '3d-unet-99.9'])` + * CM names: `--adr.['kits19-preprocessed']...` + - CM script: [get-preprocessed-dataset-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-kits19) + * get,dataset,librispeech,preprocessed + * `if (CM_MODEL == rnnt)` + * CM names: `--adr.['librispeech-preprocessed']...` + - CM script: [get-preprocessed-dataset-librispeech](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-librispeech) + * get,dataset,criteo,preprocessed + * `if (CM_MODEL in ['dlrm-99', 'dlrm-99.9'])` + * CM names: `--adr.['criteo-preprocessed']...` + - CM script: [get-preprocessed-dataset-criteo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-criteo) + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + * get,loadgen + * CM names: `--adr.['loadgen', 'mlperf-inference-loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlcommons,inference,src + * CM names: `--adr.['mlperf-implementation']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_package.psutil + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-python/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml)*** + * remote,run,cmds + * `if (CM_ASSH_RUN_COMMANDS == on)` + * CM names: `--adr.['remote-run-cmds']...` + - CM script: [remote-run-commands](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/remote-run-commands) + 1. ***Run native script if exists*** + 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml)*** + * benchmark-mlperf + * `if (CM_MLPERF_SKIP_RUN != on)` + * CM names: `--adr.['mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-python/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml)*** + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + +___ +### Script output +`cmr "app vision language mlcommons mlperf inference reference ref [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_MAX_EXAMPLES` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_BACKEND` +* `CM_MLPERF_CONF` +* `CM_MLPERF_DEVICE` +* `CM_MLPERF_LOADGEN_EXTRA_OPTIONS` +* `CM_MLPERF_LOADGEN_MODE` +* `CM_MLPERF_LOADGEN_QPS_OPT` +* `CM_MLPERF_LOADGEN_SCENARIO` +* `CM_MLPERF_OUTPUT_DIR` +* `CM_MLPERF_RUN_CMD` +* `CM_ML_MODEL_FILE_WITH_PATH` \ No newline at end of file diff --git a/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/script/app-mlperf-inference-mlcommons-python/_cm.yaml new file mode 100644 index 0000000000..f2ca733fd9 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-python/_cm.yaml @@ -0,0 +1,1166 @@ +# Identification of this CM script +alias: app-mlperf-inference-mlcommons-python +uid: ff149e9781fc4b65 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - inference + - reference + - ref + +# Default environment +default_env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + CM_TEST_QUERY_COUNT: '10' + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + imagenet_path: IMAGENET_PATH + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: CM_MLPERF_LOADGEN_MODE + num_threads: CM_NUM_THREADS + threads: CM_NUM_THREADS + dataset: CM_MLPERF_VISION_DATASET_OPTION + model: CM_MLPERF_CUSTOM_MODEL_PATH + output_dir: OUTPUT_BASE_DIR + power: CM_MLPERF_POWER + power_server: CM_MLPERF_POWER_SERVER_ADDRESS + ntp_server: CM_MLPERF_POWER_NTP_SERVER + max_amps: CM_MLPERF_POWER_MAX_AMPS + max_volts: CM_MLPERF_POWER_MAX_VOLTS + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + scenario: CM_MLPERF_LOADGEN_SCENARIO + test_query_count: CM_TEST_QUERY_COUNT + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + network: CM_NETWORK_LOADGEN + sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS + + +# Duplicate CM environment variables to the ones used in native apps +env_key_mappings: + CM_HOST_: HOST_ + CM_ML_: ML_ + CM_MLPERF_TVM: MLPERF_TVM + CM_MLPERF_DELETE: MLPERF_DELETE + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + # Detect CUDA if required + - tags: get,cuda,_cudnn + enable_if_env: + CM_MLPERF_DEVICE: + - gpu + CM_MLPERF_BACKEND: + - onnxruntime + - tf + - tflite + - pytorch + + # Detect TensorRT if required + - tags: get,nvidia,tensorrt + enable_if_env: + CM_MLPERF_BACKEND: + - tensorrt + + + + + ######################################################################## + # Install ML engines via CM + + ## Onnx CPU Runtime + - tags: get,generic-python-lib,_onnxruntime + names: + - ml-engine-onnxruntime + - onnxruntime + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + - tvm-onnx + CM_MLPERF_DEVICE: + - cpu + - rocm + + ## Onnx CUDA Runtime + - tags: get,generic-python-lib,_onnxruntime_gpu + names: + - ml-engine-onnxruntime-cuda + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + - tvm-onnx + CM_MLPERF_DEVICE: + - gpu + skip_if_env: + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + + ## resnet50 and 3d-unet need both onnxruntime and onnxruntime_gpu on cuda + - tags: get,generic-python-lib,_onnxruntime + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + - resnet50 + - tags: get,generic-python-lib,_onnxruntime_gpu + env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: "" + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + - resnet50 + + ## Pytorch (CPU) + - tags: get,generic-python-lib,_torch + names: + - ml-engine-pytorch + - pytorch + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + CM_MLPERF_DEVICE: + - cpu + - rocm + + ## Pytorch (CUDA) + - tags: get,generic-python-lib,_torch_cuda + names: + - ml-engine-pytorch + - pytorch + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + - ray + CM_MLPERF_DEVICE: + - gpu + + ## Torchvision (CPU) + - tags: get,generic-python-lib,_torchvision + names: + - ml-engine-torchvision + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + CM_MLPERF_DEVICE: + - cpu + + ## Torchvision (CUDA) + - tags: get,generic-python-lib,_torchvision_cuda + names: + - ml-engine-torchvision + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + - ray + CM_MLPERF_DEVICE: + - gpu + + ## tensorrt + - tags: get,generic-python-lib,_tensorrt + names: + - ml-engine-tensorrt + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## torch_tensorrt + - tags: get,generic-python-lib,_torch_tensorrt + names: + - ml-engine-torch_tensorrt + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## Ray + - tags: get,generic-python-lib,_ray + names: + - ray + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## async_timeout (for multi-node) + # NOTE. This is a bug in ray 2.8.0. Ray 2.8.0 needs the pip package + # async_timeout to be installed, so we need to install it manually. + - tags: get,generic-python-lib,_async_timeout + names: + - async_timeout + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## Transformers + - tags: get,generic-python-lib,_transformers + names: + - ml-engine-transformers + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + - gptj-99 + - gptj-99.9 + + ## Tensorflow + - tags: get,generic-python-lib,_tensorflow + names: + - ml-engine-tensorflow + - tensorflow + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + + ## NCNN + - tags: get,generic-python-lib,_package.ncnn + names: + - ml-engine-ncnn + enable_if_env: + CM_MLPERF_BACKEND: + - ncnn + + # - tags: get,generic-python-lib + # names: + # - ml-engine-tflite + # enable_if_env: + # CM_MLPERF_BACKEND: + # - tflite + # CM_MLPERF_DEVICE: + # - tpu + + + + ######################################################################## + # Install ML models + + - tags: get,ml-model,neural-magic,zoo + # sets CM_MLPERF_CUSTOM_MODEL_PATH + names: + - custom-ml-model + enable_if_env: + CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB: + - "on" + update_tags_from_env_with_prefix: + "_model-stub.": + - CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB + + ## ResNet50 + - tags: get,ml-model,image-classification,resnet50 + names: + - ml-model + - resnet50-model + enable_if_env: + CM_MODEL: + - resnet50 + skip_if_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + + ## RetinaNet + - tags: get,ml-model,object-detection,retinanet + names: + - ml-model + - retinanet-model + enable_if_env: + CM_MODEL: + - retinanet + + ## GPT-J + - tags: get,ml-model,large-language-model,gptj + names: + - ml-model + - gptj-model + - gpt-j-model + enable_if_env: + CM_MODEL: + - gptj-99 + - gptj-99.9 + + + ## RetinaNet (PyTorch weights, FP32) + - tags: get,ml-model,object-detection,resnext50,fp32,_pytorch-weights + names: + - ml-model + - retinanet-model + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_IMPLEMENTATION: + - nvidia + CM_MODEL: + - retinanet + + ## BERT + - tags: get,ml-model,language-processing,bert-large + names: + - ml-model + - bert-model + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + skip_if_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + + ## SDXL + - tags: get,ml-model,stable-diffusion,text-to-image,sdxl + names: + - ml-model + - sdxl-model + enable_if_env: + CM_MODEL: + - stable-diffusion-xl + skip_if_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + + ## LLAMA2-70B + - tags: get,ml-model,llama2 + names: + - ml-model + - llama2-model + enable_if_env: + CM_MODEL: + - llama2-70b-99 + - llama2-70b-99.9 + skip_if_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + + ## 3d-unet + - tags: get,ml-model,medical-imaging,3d-unet + names: + - ml-model + - 3d-unet-model + enable_if_env: + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + + ## Rnnt + - tags: get,ml-model,speech-recognition,rnnt + names: + - ml-model + - rnnt-model + enable_if_env: + CM_MODEL: + - rnnt + + ## Dlrm + - tags: get,ml-model,recommendation,dlrm + names: + - ml-model + - dlrm-model + enable_if_env: + CM_MODEL: + - dlrm-99 + - dlrm-99.9 + + ######################################################################## + # Install datasets + + ## ImageNet (small for tests) + - tags: get,dataset,image-classification,imagenet,preprocessed + names: + - imagenet-preprocessed + enable_if_env: + CM_MODEL: + - resnet50 + skip_if_env: + CM_MLPERF_VISION_DATASET_OPTION: + - on + + - tags: get,dataset,image-classification,imagenet,preprocessed,_pytorch + names: + - imagenet-preprocessed + enable_if_env: + CM_MODEL: + - resnet50 + CM_MLPERF_VISION_DATASET_OPTION: + - imagenet_pytorch + + - tags: get,dataset-aux,image-classification,imagenet-aux + enable_if_env: + CM_MODEL: + - resnet50 + + ## Open Images for RetinaNet + - tags: get,dataset,object-detection,open-images,openimages,preprocessed,_validation + names: + - openimages-preprocessed + enable_if_env: + CM_MODEL: + - retinanet + + ## CNNDM for Large Language Model + - tags: get,dataset,cnndm,_validation + names: + - cnndm-preprocessed + enable_if_env: + CM_MODEL: + - gptj-99 + - gptj-99.9 + + ## Squad for BERT + - tags: get,dataset,squad,original + names: + - cnndm-preprocessed + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + + - tags: get,dataset-aux,squad-vocab + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + + ## COCO for SDXL + - tags: get,dataset,coco2014,_validation + names: + - coco2014-preprocessed + enable_if_env: + CM_MODEL: + - stable-diffusion-xl + + ## OpenOrca for LLAMA2-70b + - tags: get,preprocessed,dataset,openorca,_validation + names: + - openorca-preprocessed + enable_if_env: + CM_MODEL: + - llama2-70b-99 + - llama2-70b-99.9 + + ## Kits19 for 3d-unet + - tags: get,dataset,kits19,preprocessed + names: + - kits19-preprocessed + enable_if_env: + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + + ## Librispeech for rnnt + - tags: get,dataset,librispeech,preprocessed + names: + - librispeech-preprocessed + enable_if_env: + CM_MODEL: + - rnnt + + ## Criteo for dlrm + - tags: get,dataset,criteo,preprocessed + names: + - criteo-preprocessed + enable_if_env: + CM_MODEL: + - dlrm-99 + - dlrm-99.9 + + ######################################################################## + # Install MLPerf inference dependencies + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + # Install MLPerf loadgen + - tags: get,loadgen + names: + - loadgen + - mlperf-inference-loadgen + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + env: + CM_GET_MLPERF_IMPLEMENTATION_ONLY: 'yes' + names: + - mlperf-implementation + + - tags: get,generic-python-lib,_package.psutil + +prehook_deps: + - names: + - remote-run-cmds + tags: remote,run,cmds + enable_if_env: + CM_ASSH_RUN_COMMANDS: + - "on" + +posthook_deps: + - names: + - mlperf-runner + tags: benchmark-mlperf + skip_if_env: + CM_MLPERF_SKIP_RUN: + - "on" + +post_deps: + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + python: + group: implementation + default: true, + add_deps_recursive: + imagenet-accuracy-script: + tags: _float32 + env: + CM_MLPERF_PYTHON: 'yes' + CM_MLPERF_IMPLEMENTATION: reference + + # ML engine + onnxruntime: + group: framework + default: true + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + openimages-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_onnx + env: + CM_MLPERF_BACKEND: onnxruntime + + onnxruntime,cpu: + env: + CM_MLPERF_BACKEND_VERSION: <<>> + + onnxruntime,cuda: + env: + CM_MLPERF_BACKEND_VERSION: <<>> + ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider" + + pytorch: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + openimages-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_pytorch + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + + pytorch,rocm: + add_deps_recursive: + pytorch: + tags: _rocm + + ray: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + openimages-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_pytorch + env: + CM_MLPERF_BACKEND: ray + CM_MLPERF_BACKEND_VERSION: <<>> + + tf,rocm: + add_deps_recursive: + tensorflow: + tags: _rocm + env: + CM_MLPERF_BACKEND_VERSION: <<>> + + onnxruntime,rocm: + add_deps_recursive: + onnxruntime: + tags: _rocm + inference-src: + tags: _repo.https://github.com/ctuning/inference + env: + ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "ROCMExecutionProvider" + CM_MLPERF_BACKEND_VERSION: <<>> + + ncnn: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_ncnn + env: + CM_MLPERF_BACKEND: ncnn + CM_MLPERF_BACKEND_VERSION: <<>> + CM_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch + + tflite: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NHWC + ml-model: + tags: raw,_tflite,_no-argmax + env: + CM_MLPERF_BACKEND: tflite + CM_MLPERF_BACKEND_VERSION: <<>> + CM_MLPERF_VISION_DATASET_OPTION: imagenet_tflite_tpu + + tf: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NHWC + ml-model: + tags: raw,_tf + env: + CM_MLPERF_BACKEND: tf + CM_MLPERF_BACKEND_VERSION: <<>> + + tensorflow: + alias: tf + + deepsparse: + group: framework + env: + CM_MLPERF_BACKEND: deepsparse + CM_MLPERF_BACKEND_VERSION: <<>> + deps: + - tags: get,generic-python-lib,_deepsparse + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - aarch64 + - tags: get,generic-python-lib,_package.deepsparse-nightly + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - aarch64 + add_deps_recursive: + mlperf-implementation: + version: deepsparse + ml-model: + tags: raw,_deepsparse + + tvm-onnx: + group: framework + env: + CM_MLPERF_BACKEND: tvm-onnx + CM_MLPERF_BACKEND_VERSION: <<>> + deps: + - tags: get,generic-python-lib,_onnx + - tags: get,tvm + names: + - tvm + - tags: get,tvm-model,_onnx + names: + - tvm-model + update_tags_from_env_with_prefix: + _model.: + - CM_MODEL + + + tvm-tflite: + group: framework + env: + CM_MLPERF_BACKEND: tvm-tflite + CM_MLPERF_BACKEND_VERSION: <<>> + deps: + - tags: get,generic-python-lib,_tflite + - tags: get,tvm + names: + - tvm + - tags: get,tvm-model,_tflite + names: + - tvm-model + update_tags_from_env_with_prefix: + _model.: + - CM_MODEL + + tvm-pytorch: + group: framework + env: + CM_MLPERF_BACKEND: tvm-pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + CM_PREPROCESS_PYTORCH: 'yes' + MLPERF_TVM_TORCH_QUANTIZED_ENGINE: qnnpack + deps: + - tags: get,generic-python-lib,_torch + - tags: get,tvm + names: + - tvm + - tags: get,tvm-model,_pytorch + names: + - tvm-model + update_tags_from_env_with_prefix: + _model.: + - CM_MODEL + + # Reference MLPerf models + gptj-99.9: + group: models + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + + gptj-99: + group: models + base: + - gptj_ + env: + CM_MODEL: gptj-99 + + gptj_: + deps: + - tags: get,generic-python-lib,_torch + - tags: get,generic-python-lib,_package.datasets + - tags: get,generic-python-lib,_package.attrs + - tags: get,generic-python-lib,_package.accelerate + + bert-99.9: + group: models + base: + - bert + env: + CM_MODEL: bert-99.9 + + bert-99: + group: models + base: + - bert + env: + CM_MODEL: bert-99 + + bert: + env: + CM_MLPERF_MODEL_SKIP_BATCHING: true + deps: + - tags: get,generic-python-lib,_package.pydantic + version_max: "1.10.9" + - tags: get,generic-python-lib,_tokenization + - tags: get,generic-python-lib,_six + - tags: get,generic-python-lib,_package.absl-py + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "3.19" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + - tags: get,generic-python-lib,_boto3 + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tags: get,generic-python-lib,_torch + names: + - ml-engine-pytorch + - pytorch + skip_if_env: + CM_MLPERF_DEVICE: + - gpu + add_deps_recursive: + inference-src: + tags: _deeplearningexamples + + sdxl: + group: models + env: + CM_MODEL: stable-diffusion-xl + CM_NUM_THREADS: "1" + deps: + - tags: get,generic-python-lib,_package.diffusers + names: + - diffusers + - tags: get,generic-python-lib,_package.transformers + names: + - transformers + - tags: get,generic-python-lib,_package.accelerate + names: + - accelerate + - tags: get,generic-python-lib,_package.torchmetrics + names: + - torchmetrics + - tags: get,generic-python-lib,_package.torch-fidelity + names: + - torch-fidelity + - tags: get,generic-python-lib,_package.open_clip_torch + names: + - open-clip + - tags: get,generic-python-lib,_package.opencv-python + names: + - opencv-python + - tags: get,generic-python-lib,_package.scipy + names: + - scipy + version: 1.10.1 + + llama2-70b_: + deps: + - tags: get,generic-python-lib,_package.transformers + names: + - transformers + - tags: get,generic-python-lib,_package.datasets + names: + - datasets + - tags: get,generic-python-lib,_package.sentencepiece + names: + - sentencepiece + - tags: get,generic-python-lib,_package.protobuf + names: + - protobuf + - tags: get,generic-python-lib,_package.accelerate + names: + - accelerate + - tags: get,generic-python-lib,_package.absl-py + names: + - absl-py + - tags: get,generic-python-lib,_package.evaluate + names: + - evaluate + - tags: get,generic-python-lib,_package.nltk + names: + - nltk + - tags: get,generic-python-lib,_package.rouge-score + names: + - rouge-score + + llama2-70b-99: + group: models + env: + CM_MODEL: llama2-70b-99 + base: + - llama2-70b_ + + llama2-70b_,cuda: + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + + llama2-70b-99.9: + group: models + env: + CM_MODEL: llama2-70b-99.9 + base: + - llama2-70b_ + + 3d-unet-99.9: + group: models + base: + - 3d-unet + env: + CM_MODEL: 3d-unet-99.9 + + 3d-unet-99: + group: models + base: + - 3d-unet + env: + CM_MODEL: 3d-unet-99 + + 3d-unet: + env: + CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true + CM_MLPERF_MODEL_SKIP_BATCHING: true + deps: + - tags: get,generic-python-lib,_package.nibabel + + dlrm-99.9: + group: models + base: + - dlrm + env: + CM_MODEL: dlrm-99.9 + + dlrm-99: + group: models + base: + - dlrm + env: + CM_MODEL: dlrm-99 + + dlrm: + env: + CM_MLPERF_MODEL_SKIP_BATCHING: true + deps: + - tags: get,dlrm,src + names: + - dlrm-src + - tags: get,generic-python-lib,_mlperf_logging + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_tensorboard + - tags: get,generic-python-lib,_protobuf + - tags: get,generic-python-lib,_scikit-learn + - tags: get,generic-python-lib,_tqdm + - tags: get,generic-python-lib,_onnx + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_package.torchrec + - tags: get,generic-python-lib,_package.pyre-extensions + - tags: get,generic-python-lib,_package.torchsnapshot + + rnnt: + group: models + env: + CM_MODEL: rnnt + CM_MLPERF_MODEL_SKIP_BATCHING: true + CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true + deps: + - tags: get,generic-python-lib,_package.pydantic + version_max: "1.10.9" + - tags: get,generic-python-lib,_librosa + names: + - librosa + - tags: get,generic-python-lib,_inflect + - tags: get,generic-python-lib,_unidecode + - tags: get,generic-python-lib,_toml + add_deps_recursive: + mlperf-implementation: + tags: _repo.https://github.com/GATEOverflow/inference + + retinanet: + group: models + deps: + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pycocotools + + env: + CM_MODEL: retinanet + CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes' + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '1' + + resnet50: + group: models + default: true + env: + CM_MODEL: resnet50 + CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes' + deps: + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pycocotools + prehook_deps: + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "4.23.4" + version_max_usable: "4.23.4" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: no + USE_GPU: no + + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + USE_CUDA: yes + USE_GPU: yes + + rocm: + group: device + env: + CM_MLPERF_DEVICE: rocm + USE_GPU: yes + + tpu: + group: device + env: + CM_MLPERF_DEVICE: tpu + + tpu,tflite: + add_deps_recursive: + imagenet-preprocessed: + tags: _tflite_tpu + + # Loadgen scenarios + offline: + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + multistream: + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + singlestream: + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + server: + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + # Model precision + fp32: + group: precision + default: true + add_deps_recursive: + ml-model: + tags: + _fp32 + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float32 + + # Model precision + float16: + group: precision + add_deps_recursive: + ml-model: + tags: + _fp32 + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float16 + + # Model precision + bfloat16: + group: precision + add_deps_recursive: + ml-model: + tags: + _fp32 + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: bfloat16 + + int8: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: int8 + add_deps_recursive: + ml-model: + tags: + _int8 + + quantized: + alias: int8 + + batch_size.#: + group: batch-size + env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" + add_deps_recursive: + ml-model: + tags: + _batch_size.# + tvm-model: + tags: + _batch_size.# + + network-sut: + group: network + env: + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_sut + CM_NETWORK_LOADGEN: sut + + network-lon: + group: network + env: + CM_NETWORK_LOADGEN: lon + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_loadgen + + beam_size.#: + env: + GPTJ_BEAM_SIZE: "#" + + # Reproducibility (past submissions) + r2.1_default: + add_deps_recursive: + compiler: + tags: llvm + inference-src: + tags: _octoml + loadgen: + version: r2.1 + env: + CM_RERUN: 'yes' + CM_SKIP_SYS_UTILS: 'yes' + CM_TEST_QUERY_COUNT: '100' diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py new file mode 100644 index 0000000000..23a738453d --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -0,0 +1,372 @@ +from cmind import utils +import os +import json +import shutil +import subprocess + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return':0} + + if env.get('CM_MLPERF_POWER','') == "yes": + power = "yes" + else: + power = "no" + + rerun = True if env.get("CM_RERUN","")!='' else False + + if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: + env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if 'CM_MLPERF_LOADGEN_MODE' not in env: + env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" + + if 'CM_MODEL' not in env: + return {'return': 1, 'error': "Please select a variation specifying the model to run"} + + #if env['CM_MODEL'] == "resnet50": + # cmd = "cp " + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['CM_DATASET_PATH'], + # "val_map.txt") + # ret = os.system(cmd) + + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " + + if 'CM_MLPERF_LOADGEN_QPS' not in env: + env['CM_MLPERF_LOADGEN_QPS_OPT'] = "" + else: + env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + env['CM_MLPERF_LOADGEN_QPS'] + + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT'] + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE','') != '' and not env.get('CM_MLPERF_MODEL_SKIP_BATCHING', False): + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE']) + + if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE','') != '': + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + str(env['CM_MLPERF_LOADGEN_BATCH_SIZE']) + + if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT','') != '' and not env.get('CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and (env['CM_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['CM_MODEL']) and env.get('CM_MLPERF_RUN_STYLE','') != "valid": + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + env['CM_MLPERF_LOADGEN_QUERY_COUNT'] + + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + + + x="" if os_info['platform'] == 'windows' else "'" + if "llama2-70b" in env['CM_MODEL']: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + x+ env['CM_MLPERF_CONF'] + x + else: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf "+ x + env['CM_MLPERF_CONF'] + x + + env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH') + if not env['MODEL_DIR']: + env['MODEL_DIR'] = os.path.dirname(env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH'))) + + RUN_CMD = "" + state['RUN'] = {} + test_list = ["TEST01", "TEST04", "TEST05"] + if env['CM_MODEL'] in ["rnnt", "bert-99", "bert-99.9", "dlrm-99", "dlrm-99.9", "3d-unet-99", "3d-unet-99.9"]: + test_list.remove("TEST04") + + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + state['RUN'][scenario] = {} + scenario_extra_options = '' + + NUM_THREADS = env['CM_NUM_THREADS'] + if int(NUM_THREADS) > 2 and env['CM_MLPERF_DEVICE'] == "gpu": + NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU + + if env['CM_MODEL'] in [ 'resnet50', 'retinanet', 'stable-diffusion-xl' ] : + scenario_extra_options += " --threads " + NUM_THREADS + + ml_model_name = env['CM_MODEL'] + if 'CM_MLPERF_USER_CONF' in env: + user_conf_path = env['CM_MLPERF_USER_CONF'] + x="" if os_info['platform'] == 'windows' else "'" + if 'llama2-70b' in env['CM_MODEL']: + scenario_extra_options += " --user-conf " + x + user_conf_path + x + else: + scenario_extra_options += " --user_conf " + x + user_conf_path + x + + mode = env['CM_MLPERF_LOADGEN_MODE'] + mode_extra_options = "" + + if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [ 'resnet50', 'retinanet' ]: + #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] + if env.get('CM_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: + dataset_options = " --use_preprocessed_dataset --cache_dir "+env['CM_DATASET_PREPROCESSED_PATH'] + else: + dataset_options = "" + if env['CM_MODEL'] == "retinanet": + dataset_options += " --dataset-list "+ env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + elif env['CM_MODEL'] == "resnet50": + dataset_options += " --dataset-list "+ os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') + else: + if 'CM_DATASET_PREPROCESSED_PATH' in env: + env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') + else: + env['DATA_DIR'] = env.get('CM_DATASET_PATH') + dataset_options = '' + + if env.get('CM_MLPERF_EXTRA_DATASET_ARGS','') != '': + dataset_options += " " + env['CM_MLPERF_EXTRA_DATASET_ARGS'] + + if mode == "accuracy": + mode_extra_options += " --accuracy" + + elif mode == "performance": + pass + + elif mode == "compliance": + + audit_full_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] + mode_extra_options = " --audit '" + audit_full_path + "'" + + if env.get('CM_MLPERF_OUTPUT_DIR', '') == '': + env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd() + + mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference') + cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) + + if env.get('CM_NETWORK_LOADGEN', '') == "lon": + + run_cmd = i['state']['mlperf_inference_run_cmd'] + env['CM_SSH_RUN_COMMANDS'] = [] + env['CM_SSH_RUN_COMMANDS'].append(run_cmd.replace("--network=lon", "--network=sut") + " &") + + + env['CM_MLPERF_RUN_CMD'] = cmd + env['CM_RUN_DIR'] = run_dir + env['CM_RUN_CMD'] = cmd + env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') #for tvm + + if env.get('CM_HOST_PLATFORM_FLAVOR','') == "arm64": + env['CM_HOST_PLATFORM_FLAVOR'] = "aarch64" + + return {'return':0} + +def get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, implementation="reference"): + if implementation == "reference": + return get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_options, dataset_options) + if implementation == "nvidia": + return get_run_cmd_nvidia(os_info, env, scenario_extra_options, mode_extra_options, dataset_options) + return "", os.getcwd() + + +def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_options, dataset_options): + + if env['CM_MODEL'] in [ "gptj-99", "gptj-99.9" ]: + + env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j") + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \ + " main.py --model-path=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + scenario_extra_options + mode_extra_options + dataset_options + cmd = cmd.replace("--count", "--max_examples") + if env['CM_MLPERF_DEVICE'] == "gpu": + gpu_options = " --gpu" + env['CUDA_VISIBLE_DEVICES'] = "0" + else: + gpu_options = "" + cmd = cmd + gpu_options + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + return cmd, env['RUN_DIR'] + + if env['CM_MODEL'] in [ "resnet50", "retinanet" ]: + + env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "vision", "classification_and_detection") + env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + if env.get('CM_MLPERF_VISION_DATASET_OPTION','') == '' and env.get('CM_MLPERF_DEVICE') != "tpu": + if os_info['platform'] == 'windows': + cmd = "python python/main.py --profile "+env['CM_MODEL']+"-"+env['CM_MLPERF_BACKEND'] + \ + " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_PREPROCESSED_PATH'] + \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ + " --output " + env['OUTPUT_DIR'] + " " + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options + else: + cmd = "./run_local.sh " + env['CM_MLPERF_BACKEND'] + ' ' + \ + env['CM_MODEL'] + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options + return cmd, env['RUN_DIR'] + + if env['CM_MLPERF_BACKEND'] == "ncnn": + env['MODEL_FILE'] = os.path.join(os.path.dirname(env.get('CM_ML_MODEL_FILE_WITH_PATH')), "resnet50_v1") + else: + env['MODEL_FILE'] = env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH')) + if not env['MODEL_FILE']: + return {'return': 1, 'error': 'No valid model file found!'} + + + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + + extra_options = " --output "+ env['CM_MLPERF_OUTPUT_DIR'] +" --model-name resnet50 --dataset " + env['CM_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + " --dataset-path "+env['CM_DATASET_PREPROCESSED_PATH']+" --model "+env['MODEL_FILE'] + \ + " --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] + + if env.get('CM_MLPERF_DEVICE') == "tpu": + cmd = "cd '" + os.path.join(env['RUN_DIR'],"python") + "' && "+env.get('CM_SUDO', "")+" "+env['CM_PYTHON_BIN_WITH_PATH']+ " main.py "+\ + "--backend "+env['CM_MLPERF_BACKEND']+ " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] +" --device tpu "+ \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + extra_options + else: + cmd = "cd '" + os.path.join(env['RUN_DIR'],"python") + "' && "+env['CM_PYTHON_BIN_WITH_PATH']+ " main.py "+\ + "--backend "+env['CM_MLPERF_BACKEND']+ " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + extra_options + env['SKIP_VERIFY_ACCURACY'] = True + + elif "bert" in env['CM_MODEL']: + + env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "bert") + env['MODEL_FILE'] = env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH')) + if not env['MODEL_FILE']: + return {'return': 1, 'error': 'No valid model file found!'} + if env.get('CM_MLPERF_QUANTIZATION') in [ "on", True, "1", "True" ]: + quantization_options = " --quantized" + else: + quantization_options = "" + cmd = env['CM_PYTHON_BIN_WITH_PATH']+ " run.py --backend=" + env['CM_MLPERF_BACKEND'] + " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + quantization_options + if env['CM_MLPERF_BACKEND'] == "deepsparse": + cmd += " --batch_size=" + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + " --model_path=" + env['MODEL_FILE'] + + if env.get('CM_MLPERF_CUSTOM_MODEL_PATH', '') != '': + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['MODEL_FILE'] + + cmd = cmd.replace("--count", "--max_examples") + env['VOCAB_FILE'] = env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + env['DATASET_FILE'] = env['CM_DATASET_SQUAD_VAL_PATH'] + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['SKIP_VERIFY_ACCURACY'] = True + + elif "rnnt" in env['CM_MODEL']: + + env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_RNNT_PATH'] + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend " + env['CM_MLPERF_BACKEND'] + \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --manifest " + env['CM_DATASET_PREPROCESSED_JSON'] + \ + " --dataset_dir " + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") + \ + " --pytorch_config_toml " + os.path.join("pytorch", "configs", "rnnt.toml") + \ + " --pytorch_checkpoint " + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + " --log_dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + env['SKIP_VERIFY_ACCURACY'] = True + + elif "stable-diffusion-xl" in env['CM_MODEL']: + env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image") + backend = env['CM_MLPERF_BACKEND'] + device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" + max_batchsize = env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --profile " + 'stable-diffusion-xl-pytorch ' + \ + " --dataset " + 'coco-1024' + \ + " --dataset-path " + env['CM_DATASET_PATH_ROOT'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \ + " --device " + device + \ + " --max-batchsize " + max_batchsize + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ + " "#--model-path " + env['MODEL_DIR'] + + elif "llama2-70b" in env['CM_MODEL']: + env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b") + backend = env['CM_MLPERF_BACKEND'] + device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + \ + " --device " + device.replace("cuda", "cuda:0") + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + " --model-path " + env['MODEL_DIR'] + cmd = cmd.replace("--count", "--total-sample-count") + elif "3d-unet" in env['CM_MODEL']: + + env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + backend = env['CM_MLPERF_BACKEND'] if env['CM_MLPERF_BACKEND'] != 'tf' else 'tensorflow' + cmd = env['CM_PYTHON_BIN_WITH_PATH']+ " run.py --backend=" + backend + " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + " --model="+env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + " --preprocessed_data_dir="+env['CM_DATASET_PREPROCESSED_PATH'] + \ + scenario_extra_options + mode_extra_options + dataset_options + + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['SKIP_VERIFY_ACCURACY'] = True + + elif "dlrm" in env['CM_MODEL']: # DLRM is in draft stage + + env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_DLRM_PATH'], "..", "dlrm_v2", "pytorch") + if 'terabyte' in env['CM_ML_MODEL_DATASET']: + dataset = "terabyte" + elif 'kaggle' in env['CM_ML_MODEL_DATASET']: + dataset = "kaggle" + elif 'multihot-criteo-sample' in env['CM_ML_MODEL_DATASET']: + dataset = "multihot-criteo-sample" + elif 'multihot-criteo' in env['CM_ML_MODEL_DATASET']: + dataset = "multihot-criteo" + + if env.get('CM_MLPERF_BIN_LOADER', '') == 'yes': + mlperf_bin_loader_string = " --mlperf-bin-loader" + else: + mlperf_bin_loader_string = "" + if env.get('CM_ML_MODEL_DEBUG','') == 'yes': + config = " --max-ind-range=10000000 --data-sub-sample-rate=0.875 " + else: + config = " --max-ind-range=40000000 " + + if env['CM_MLPERF_DEVICE'] == "gpu": + gpu_options = "" + env['CUDA_VISIBLE_DEVICES'] = "0" + else: + gpu_options = "" + env['WORLD_SIZE'] = "1" + + if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" and env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline": + mode_extra_options += " --samples-per-query-offline=1" + + cmd = " ./run_local.sh " + env['CM_MLPERF_BACKEND'] + \ + ' dlrm ' + dataset + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + config + mlperf_bin_loader_string + \ + ' --samples-to-aggregate-quantile-file=./tools/dist_quantile.txt ' + \ + scenario_extra_options + mode_extra_options + dataset_options + gpu_options + cmd = cmd.replace("--count", "--count-queries") + env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + + if env.get('CM_NETWORK_LOADGEN', '') in [ "lon", "sut" ]: + cmd = cmd + " " + "--network " + env['CM_NETWORK_LOADGEN'] + if env.get('CM_NETWORK_LOADGEN_SUT_SERVERS', []): + sut_servers = env['CM_NETWORK_LOADGEN_SUT_SERVERS'] + cmd += " --sut_server '"+"','".join(sut_servers)+"' " + + return cmd, env['RUN_DIR'] + +def postprocess(i): + + env = i['env'] + state = i['state'] + + inp = i['input'] + + return {'return':0} diff --git a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py new file mode 100644 index 0000000000..705f1e3539 --- /dev/null +++ b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py @@ -0,0 +1,550 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__doc__ = """Scripts that take a retinanet engine and openImage input, infer the output and test the accuracy +""" + +import argparse +import json +import os +import sys +import glob +import random +import time +import pycuda +from PIL import Image +from importlib import import_module +from typing import Dict, Tuple, List, Optional + +from code.common.fix_sys_path import ScopedRestrictedImport +#with ScopedRestrictedImport(): +import numpy as np +import torch # Retinanet model source requires GPU installation of PyTorch 1.10 +from torchvision.transforms import functional as F +import onnx +import tensorrt as trt +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from code.common import logging +from code.common.constants import TRT_LOGGER, Scenario +from code.common.systems.system_list import DETECTED_SYSTEM +from code.common.runner import EngineRunner, get_input_format +from code.common.systems.system_list import SystemClassifications +from code.plugin import load_trt_plugin +RetinanetEntropyCalibrator = import_module("code.retinanet.tensorrt.calibrator").RetinanetEntropyCalibrator + +G_RETINANET_NUM_CLASSES = 264 +G_RETINANET_IMG_SIZE = (800, 800) +G_RETINANET_INPUT_SHAPE = (3, 800, 800) +G_OPENIMAGE_CALSET_PATH = "build/data/open-images-v6-mlperf/calibration/train/data" +G_OPENIMAGE_CALMAP_PATH = "data_maps/open-images-v6-mlperf/cal_map.txt" +G_OPENIMAGE_VALSET_PATH = os.path.join(os.environ.get("CM_DATASET_PATH", "build/data/open-images-v6-mlperf"), "validation", "data") +G_OPENIMAGE_VALMAP_PATH = "data_maps/open-images-v6-mlperf/val_map.txt" +G_OPENIMAGE_ANNO_PATH = os.path.join(os.environ.get("CM_DATASET_PATH","build/data/open-images-v6-mlperf"), "annotations", "openimages-mlperf.json") +G_OPENIMAGE_PREPROCESSED_INT8_PATH = "build/preprocessed_data/open-images-v6-mlperf/validation/Retinanet/int8_linear" +# Using EfficientNMS now +G_RETINANET_CALIBRATION_CACHE_PATH = "code/retinanet/tensorrt/calibrator.cache" + + +def load_img_pytorch(fpath: str, do_transform=False) -> torch.tensor: + """ + Load the image from file into torch tensor + From mlcommon training repo: + https://github.com/mlcommons/training/blob/master/single_stage_detector/ssd/model/transform.py#L66 + + Args: + fpath (str): the path to the image file + do_transform (bool): whether to do postprocess (resize + normalize) + """ + loaded_tensor = F.to_tensor(Image.open(fpath).convert("RGB")) + if do_transform: + dtype = torch.float32 + device = torch.device("cpu") + image_size = [800, 800] + image_std = [0.229, 0.224, 0.225] + image_mean = [0.485, 0.456, 0.406] + mean = torch.as_tensor(image_mean, dtype=dtype, device=device) + std = torch.as_tensor(image_std, dtype=dtype, device=device) + img_norm = (loaded_tensor - mean[:, None, None]) / std[:, None, None] + img_resize = torch.nn.functional.interpolate(img_norm[None], size=image_size, scale_factor=None, mode='bilinear', + recompute_scale_factor=None, align_corners=False)[0] + return img_resize + + return loaded_tensor + + +class FirstLayerConvActPoolTacticSelector(trt.IAlgorithmSelector): + def select_algorithms(self, ctx, choices): + if "Conv_0 + 1783 + Mul_1 + 1785 + Add_2 + Relu_3 + MaxPool_4" in ctx.name: # Apply to the first layer + # MLPINF-1833: Disabled CaskConvActPool for TRT 8.5.0.4 + # TRT 8.5.0.4 has a bug with CaskConvActPool which has been fixed since 8.5.0.5 + forbidden_set = { + -3689373275198309793, # 0xccccb68da7fc3a5f + -4219016963003938541, # 0xc5730a6ceacd8913 + -4709698786673109216, # 0xbea3c9e81542d720 + 8863348452769974412, # 0x7b00f0752fdcc88c + -216502845640484144, # 0xfcfed3cf18bcdad0 + -2840175123683203852, # 0xd895abc5dcf624f4 + 4391967500208500226, # 0x3cf3672bfafcee02 + -3076721233724812250, # 0xd54d4a56ceee5426 + 8268411641074121664, # 0x72bf4c9462ed7bc0 + 3484514246525022387, # 0x305b7b3ed6e970b3 + 679919370278938099, # 0x096f8f109d6225f3 + 1531503914513228020, # 0x1540feb22cae60f4 + 8162590574723450606, # 0x714758e16557c6ee + 6137316588591593674, # 0x552c20eba11d38ca + -5252194382421728148, # 0xb71c75095873646c + -2136593403804660582, # 0xe2594b9e90c7cc9a + 58603908831090367, # 0x00d033f1d05396bf + 1454666201826561687, # 0x1430033412a38e97 + -7506077189063215810, # 0xd43db7d0f0e3ba45 + -3153162056066942395, # 0x9521940f435d0c18 + -7700711094551245800, # 0xf126325c0aa4aa02 + -1070112490556970494, # 0x97d50e90c139753e + } + filtered_idxs = [idx for idx, choice in enumerate(choices) if choice.algorithm_variant.tactic not in forbidden_set] + to_ret = filtered_idxs + else: + # By default, say that all tactics are acceptable: + to_ret = [idx for idx, _ in enumerate(choices)] + return to_ret + + def report_algorithms(self, ctx, choices): + pass + + +class TRTTester: + + def __init__(self, engine_file, batch_size, precision, onnx_path, + skip_engine_build=False, verbose=False, + output_file="build/retinanet_trt.out" + ): + """ + Test the accuracy using the onnx file and TensorRT runtime. + The tester is able to build the engine from onnx. + """ + self.batch_size = batch_size + self.verbose = verbose + self.onnx_path = onnx_path + self.engine_file = engine_file + self.cache_file = G_RETINANET_CALIBRATION_CACHE_PATH + self.precision = precision + + # TensorRT engine related fields + # Not supported on dla + self.dla_core = None + + # Initiate the plugin and logger + self.logger = TRT_LOGGER # Use the global singleton, which is required by TRT. + self.logger.min_severity = trt.Logger.VERBOSE if self.verbose else trt.Logger.INFO + load_trt_plugin("retinanet") + trt.init_libnvinfer_plugins(self.logger, "") + + if self.onnx_path is not None and not skip_engine_build: + print(f"Creating engines from onnx: {self.onnx_path}") + self.create_trt_engine() + else: + if not os.path.exists(engine_file): + raise RuntimeError(f"Cannot find engine file {engine_file}. Please supply the onnx file or engine file.") + + self.runner = EngineRunner(self.engine_file, verbose=verbose) + + # OpenImage related fields + self.image_dir = G_OPENIMAGE_VALSET_PATH + self.val_annotate = G_OPENIMAGE_ANNO_PATH + self.output_file = output_file + + def apply_flag(self, flag): + """Apply a TRT builder flag.""" + self.builder_config.flags = (self.builder_config.flags) | (1 << int(flag)) + + def clear_flag(self, flag): + """Clear a TRT builder flag.""" + self.builder_config.flags = (self.builder_config.flags) & ~(1 << int(flag)) + + # Helper function to build a TRT engine from ONNX file + def create_trt_engine(self): + self.builder = trt.Builder(self.logger) + self.builder_config = self.builder.create_builder_config() + self.builder_config.max_workspace_size = 8 << 30 + self.builder_config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED if self.verbose else trt.ProfilingVerbosity.LAYER_NAMES_ONLY + + # Precision flags + self.clear_flag(trt.BuilderFlag.TF32) + if self.precision == "fp32": + self.input_dtype = "fp32" + self.input_format = "linear" + elif self.precision == "int8": + self.input_dtype = "int8" + self.input_format = "linear" + self.apply_flag(trt.BuilderFlag.INT8) + + # Calibrator for int8 + preprocessed_data_dir = "build/preprocessed_data" + calib_image_dir = os.path.join(preprocessed_data_dir, "open-images-v6-mlperf/calibration/Retinanet/fp32") + self.calibrator = RetinanetEntropyCalibrator(data_dir=calib_image_dir, + cache_file=self.cache_file, batch_size=10, max_batches=50, + force_calibration=False, calib_data_map=G_OPENIMAGE_CALMAP_PATH) + self.builder_config.int8_calibrator = self.calibrator + + # Apply tactic selector bypassing conv act pool for Orin: + if SystemClassifications.is_orin(): + tactic_selector = FirstLayerConvActPoolTacticSelector() + self.builder_config.algorithm_selector = tactic_selector + else: + raise Exception(f"{self.precision} not supported yet.") + + self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + model = onnx.load(self.onnx_path) + parser = trt.OnnxParser(self.network, self.logger) + success = parser.parse(onnx._serialize(model)) + if not success: + err_desc = parser.get_error(0).desc() + raise RuntimeError(f"Retinanet onnx model processing failed! Error: {err_desc}") + + # Set the network input type + if self.precision == "int8": + self.network.get_input(0).dtype = trt.int8 + + # Add obey_precision_constraints flag to suppress reformat + self.apply_flag(trt.BuilderFlag.OBEY_PRECISION_CONSTRAINTS) + + # Prepare the optimization profiles + self.profiles = [] + self.num_profiles = 1 # Can create more profiles here if needed + if self.dla_core is None: + for i in range(self.num_profiles): + profile = self.builder.create_optimization_profile() + for input_idx in range(self.network.num_inputs): + input_shape = self.network.get_input(input_idx).shape + input_name = self.network.get_input(input_idx).name + min_shape = trt.Dims(input_shape) + min_shape[0] = 1 + max_shape = trt.Dims(input_shape) + max_shape[0] = self.batch_size + profile.set_shape(input_name, min_shape, max_shape, max_shape) + if not profile: + raise RuntimeError("Invalid optimization profile!") + self.builder_config.add_optimization_profile(profile) + self.profiles.append(profile) + else: + # Use fixed batch size if on DLA + for input_idx in range(self.network.num_inputs): + input_shape = self.network.get_input(input_idx).shape + input_shape[0] = self.batch_size + self.network.get_input(input_idx).shape = input_shape + + engine = self.builder.build_engine(self.network, self.builder_config) + engine_inspector = engine.create_engine_inspector() + layer_info = engine_inspector.get_engine_information(trt.LayerInformationFormat.ONELINE) + logging.info("========= TensorRT Engine Layer Information =========") + logging.info(layer_info) + + buf = engine.serialize() + logging.info(f"Writing built engine to {self.engine_file}") + with open(self.engine_file, 'wb') as f: + f.write(buf) + + def run_openimage(self, num_samples=8): + cocoGt = COCO(annotation_file=self.val_annotate) + image_ids = cocoGt.getImgIds() + cat_ids = cocoGt.getCatIds() + num_images = min(num_samples, len(image_ids)) + print(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + + detections = [] + batch_idx = 0 + for image_idx in range(0, num_images, self.batch_size): + # Print Progress + if batch_idx % 20 == 0: + print(f"Processing batch: {batch_idx} image: {image_idx}/{num_images}") + + end_idx = min(image_idx + self.batch_size, num_images) + imgs = [] + img_original_sizes = [] + for idx in range(image_idx, end_idx): + image_id = image_ids[idx] + if self.precision == "fp32": + # Load the image using pytorch routine, but perform extra resize+normalize steps + img = load_img_pytorch(os.path.join(self.image_dir, cocoGt.imgs[image_id]["file_name"]), do_transform=True).numpy() + elif self.precision == "int8": + img = np.load(os.path.join(G_OPENIMAGE_PREPROCESSED_INT8_PATH, cocoGt.imgs[image_id]["file_name"] + '.npy')) + else: + raise Exception(f"Unsupported precision {self.precision}") + imgs.append(img) + img_original_sizes.append([cocoGt.imgs[image_id]["height"], cocoGt.imgs[image_id]["width"]]) + + if self.precision == "fp32": + imgs = np.ascontiguousarray(np.stack(imgs), dtype=np.float32) + elif self.precision == "int8": + imgs = np.stack(imgs) + + start_time = time.time() + outputs = self.runner([imgs], batch_size=self.batch_size) + + if self.verbose: + duration = time.time() - start_time + logging.info(f"Batch {batch_idx} >>> Inference time: {duration}") + + # Concatted outputs is in the shape of [BS, 7001] + # image_ids (duplicate of score for loadgen): [BS, 1000, 1] + # loc: [BS, 1000, 4] + # score: [BS, 1000, 1] + # label: [BS, 1000, 1] + # Concatted into [BS, 1000, 7] then reshape to [BS, 7000] + # keep_count: [BS, 1] + concat_output = outputs[0] + + for idx in range(0, end_idx - image_idx): + # keep_count = keep_counts[idx] + keep_count = int(concat_output[idx * 7001 + 7000]) + image_height = img_original_sizes[idx][0] + image_width = img_original_sizes[idx][1] + + for prediction_idx in range(0, keep_count): + # Each detection is in the order of [dummy_image_idx, xmin, ymin, xmax, ymax, score, label] + # This is pre-callback (otherwise x and y are swapped). + single_detection = concat_output[idx * 7001 + prediction_idx * 7: idx * 7001 + prediction_idx * 7 + 7] + loc = single_detection[1:5] + label = single_detection[6] + score = single_detection[5] + + # Scale the image output from [0, 1] to (img_h, img_w) + # [ymin, xmin, ymax, xmax] + scale_h = image_height + scale_w = image_width + loc[0::2] = loc[0::2] * scale_h + loc[1::2] = loc[1::2] * scale_w + loc = loc.tolist() + + # Convert from ltrb_xyinverted to [xmin, ymin, w, h] + bbox_coco_fmt = [ + loc[1], + loc[0], + loc[3] - loc[1], + loc[2] - loc[0], + ] + + coco_detection = { + "image_id": image_ids[image_idx + idx], + "category_id": cat_ids[int(label)], + "bbox": bbox_coco_fmt, + "score": float(score), + } + detections.append(coco_detection) + batch_idx += 1 + + output_dir = os.path.dirname(self.output_file) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + with open(self.output_file, "w") as f: + json.dump(detections, f) + cocoDt = cocoGt.loadRes(self.output_file) + e = COCOeval(cocoGt, cocoDt, 'bbox') + e.params.imgIds = image_ids[:num_images] + e.evaluate() + e.accumulate() + e.summarize() + map_score = e.stats[0] + return map_score + + +class PytorchTester: + """ + The reference implementation of the retinanet from the mlcommon-training repo, from: + https://github.com/mlcommons/training/tree/master/single_stage_detector/ssd/model + + To run this tester, you would need to clone the repo, and mount it to the container. + """ + + def __init__(self, pyt_ckpt_path, training_repo_path, batch_size=8, output_file="build/retinanet_pytorch.out"): + ssd_model_path = os.path.join(training_repo_path, "single_stage_detector", "ssd") + with ScopedRestrictedImport([ssd_model_path] + sys.path): + from model.retinanet import retinanet_from_backbone + pyt_model = retinanet_from_backbone( + backbone="resnext50_32x4d", + num_classes=G_RETINANET_NUM_CLASSES, + image_size=[800, 800], + data_layout="channels_last", + pretrained=None, + trainable_backbone_layers=3 + ) + + self.training_repo_path = training_repo_path + self.device = torch.device("cuda:0") + pyt_model.to(self.device) + if pyt_model.data_layout == "channels_last": + pyt_model = pyt_model.to(memory_format=torch.channels_last) + cpt = torch.load(pyt_ckpt_path, map_location='cpu') + pyt_model.load_state_dict(cpt["model"]) + self.pyt_model = pyt_model + self.val_annotate = G_OPENIMAGE_ANNO_PATH + self.batch_size = batch_size + self.output_file = output_file + self.image_dir = G_OPENIMAGE_VALSET_PATH + + def run_openimage(self, num_samples=8): + """ + Use openimage raw input to run the pytorch referene model for images. + Note that the input image will be of different sizes, and the output bboxes are not normalized to 800,800 + The pytorch model handles the resize and postprocess internally. For more details, see: + https://github.com/mlcommons/training/blob/master/single_stage_detector/ssd/model/retinanet.py#L475 + """ + self.pyt_model.eval() + cocoGt = COCO(annotation_file=self.val_annotate) + image_ids = cocoGt.getImgIds() + cat_ids = cocoGt.getCatIds() + num_images = min(num_samples, len(image_ids)) + print(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + + coco_detections = [] + for image_idx in range(0, num_images, self.batch_size): + end_idx = min(image_idx + self.batch_size, num_images) + # Load image and transfer to tensor (original image size) + imgs = [] + for idx in range(image_idx, end_idx): + image_id = image_ids[idx] + image_path = os.path.join(self.image_dir, cocoGt.imgs[image_id]["file_name"]) + img = load_img_pytorch(image_path).to(self.device) + imgs.append(img) + # print(cocoGt.imgs[image_id]["height"], cocoGt.imgs[image_id]["width"]) + + img = [] + for idx in range(image_idx, end_idx): + image_id = image_ids[idx] + tensor = load_img_pytorch(os.path.join(self.image_dir, cocoGt.imgs[image_id]["file_name"]), do_transform=True).numpy() + print(tensor.shape) + img.append(tensor) + img = np.ascontiguousarray(np.stack(img), dtype=np.float32) + + start_time = time.time() + with torch.no_grad(): + detections = self.pyt_model(imgs) + + for idx in range(0, end_idx - image_idx): + boxes = detections[idx]["boxes"].detach().cpu().numpy() + scores = detections[idx]["scores"].detach().cpu().numpy() + labels = detections[idx]["labels"].detach().cpu().numpy() + + num_preds = boxes.shape[0] + for pred_idx in range(num_preds): + # Convert from lrtb to [xmin, ymin, w, h] for cocoeval + box_pred = boxes[pred_idx][:] + xmin, ymin, xmax, ymax = box_pred + box_pred = np.array([xmin, ymin, xmax - xmin, ymax - ymin], dtype=np.float32) + score_pred = float(scores[pred_idx]) + label_pred = int(labels[pred_idx]) + coco_detection = { + "image_id": image_ids[image_idx + idx], + "category_id": cat_ids[label_pred], + "bbox": box_pred.tolist(), # Convert ndarray to list + "score": score_pred + } + coco_detections.append(coco_detection) + + output_dir = os.path.dirname(self.output_file) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + with open(self.output_file, "w") as f: + json.dump(coco_detections, f) + cocoDt = cocoGt.loadRes(self.output_file) + e = COCOeval(cocoGt, cocoDt, 'bbox') + e.params.imgIds = image_ids[:num_images] + e.evaluate() + e.accumulate() + e.summarize() + map_score = e.stats[0] + + # Uncomment below to call reference implementation evaluate. + # Import extra helper function from training repo. + # ssd_model_path = os.path.join(self.training_repo_path, "single_stage_detector", "ssd") + # with ScopedRestrictedImport([ssd_model_path] + sys.path): + # from coco_utils import get_openimages + # import presets + # from utils import collate_fn + # from engine import evaluate + # from coco_utils import get_coco_api_from_dataset + # from coco_eval import DefaultCocoEvaluator + # coco_evaluator = evaluate(self.pyt_model, data_loader_test, device=self.device, epoch=None, args=Args) + # map_score = coco_evaluator.get_stats()['bbox'][0] + return map_score + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--engine_file", + help="Specify where the retinanet engine file is", + required=False) + parser.add_argument("--onnx_path", + help="The path to the onnx, if building from onnx", + default="build/models/retinanet-resnext50-32x4d/submission/retinanet_resnext50_32x4d_efficientNMS.800x800.onnx", + required=False) + parser.add_argument("--pyt_ckpt_path", + help="Specify where the PyTorch checkpoint file is", + default="build/models/retinanet-resnext50-32x4d/new/retinanet_model_10.pth") + parser.add_argument("--training_repo_path", + help="Specify where the MLCommons training directory is (from https://github.com/mlcommons/training)", + default="/home/scratch.zhihanj_sw/gitlab_root/mlcommons-training" + ) + parser.add_argument("--batch_size", + help="batch size", + type=int, + default=8) + parser.add_argument("--num_samples", + help="Number of samples to run. We have 24781 in total for openImages", + type=int, + default=24781) + parser.add_argument("--trt_precision", + help="Run TensorRT in the specified precision", + choices=("fp32", "fp16", "int8"), + default="fp32") + parser.add_argument("--skip_engine_build", + help="Skip the TRT engine build phase if possible.", + action="store_true") + parser.add_argument("--pytorch", + help="whether to run pytorch inference", + action="store_true") + parser.add_argument("--verbose", + help="verbose output", + action="store_true") + args = parser.parse_args() + + # Pytorch Tester + if args.pytorch: + # TODO: Check existence of training repo. + logging.info(f"Running Accuracy test for Pytorch reference implementation.") + if args.training_repo_path is None or not os.path.exists(args.training_repo_path): + raise RuntimeError("Please pull mlcommon training repo from https://github.com/mlcommons/training, and specify with --training_repo_path") + pt_tester = PytorchTester(args.pyt_ckpt_path, args.training_repo_path, args.batch_size) + pt_acc = pt_tester.run_openimage(args.num_samples) + logging.info(f"Pytorch mAP Score: {pt_acc}, Reference: 0.375, % of ref: {pt_acc / 0.375}") + else: + # TRT Tester + logging.info(f"Running accuracy test for retinanet using {args.engine_file} ...") + tester = TRTTester(args.engine_file, args.batch_size, args.trt_precision, args.onnx_path, args.skip_engine_build, args.verbose) + # acc = tester.run_openimage(args.num_samples) + acc = tester.run_openimage(args.num_samples) + logging.info(f"mAP Score: {acc}, Reference: 0.375, % of ref: {acc / 0.375}") + + # To run the TRT tester: + # python3 -m code.retinanet.tensorrt.infer --engine_file /tmp/retina.b8.int8.engine --num_samples=1200 --batch_size=8 --trt_precision int8 + # To run the pytorch tester: + # python3 -m code.retinanet.tensorrt.infer --pytorch --num_samples=1200 --batch_size=8 + + +if __name__ == "__main__": + main() diff --git a/script/app-mlperf-inference-nvidia/README-about.md b/script/app-mlperf-inference-nvidia/README-about.md new file mode 100644 index 0000000000..8c353e9ace --- /dev/null +++ b/script/app-mlperf-inference-nvidia/README-about.md @@ -0,0 +1,137 @@ +This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions. + + + +## Download the needed files + +* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links. + +For x86 machines, please download the latest install tar files from the below sites +1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11) +2. [TensorRT](https://developer.nvidia.com/tensorrt) +3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) + +
+ + + +## Using Docker (Recommended on x86 systems) + + +Assuming all the downloaded files are to the user home directory please do the following steps: + +1. Download CUDA 11.8 + ``` + wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run + ``` +2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + +3. Give docker permission to the current user + ``` + sudo usermod -aG docker $USER + ``` + Logout and login + Restart docker if required and confirm that Nvidia container toolkit is working by + ``` + nvidia-ctk --version + ``` +4. Check if Nvidia driver is working properly on the host. + ``` + nvidia-smi + ``` + If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access + ``` + cmr "install cuda prebuilt _driver" --version=11.8.0 + ``` +5. Build the docker container and mount the paths from the host machine. + ** You may want to change the `scratch_path` location as it can take 100s of GBs.** + ```bash + cm docker script --tags=build,nvidia,inference,server \ + --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \ + --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --imagenet_path=$HOME/imagenet-2012-val \ + --scratch_path=$HOME/mlperf_scratch \ + --docker_cm_repo=mlcommons@ck \ + --results_dir=$HOME/results_dir \ + --submission_dir=$HOME/submission_dir \ + --adr.compiler.tags=gcc + ``` + * Use `--docker_cache=no` to turn off docker caching + * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@ck"` to update the CK repository when docker caching is used + * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` + Now you'll be inside the CM Nvidia docker container and can run further scripts. + +7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps. + +
+ +
+ + + +## Without Docker + + +1. Install CUDA + If CUDA is not detected, CM should download and install it automatically when you run the workflow. + ** Nvidia drivers are expected to be installed on the system ** + +2. Install cuDNN + ```bash + cmr "get cudnn" --tar_file= + ``` +3. Install TensorRT + ```bash + cmr "get tensorrt _dev" --tar_file= + ``` + On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run. + +4. Build the Nvidia inference server + ``` + cmr "build nvidia inference server" \ + --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \ + --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --adr.compiler.tags=gcc \ + [--custom_system=no] + ``` + Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` +
+ + +## Acknowledgments + +* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin + sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org). +* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh. + diff --git a/script/app-mlperf-inference-nvidia/README.md b/script/app-mlperf-inference-nvidia/README.md new file mode 100644 index 0000000000..e5bdaad346 --- /dev/null +++ b/script/app-mlperf-inference-nvidia/README.md @@ -0,0 +1,1305 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference-nvidia** + +Category: **Reproduce MLPerf benchmarks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference-nvidia,bc3b17fb430f4732) ]* + +--- + +This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions. + + + +## Download the needed files + +* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links. + +For x86 machines, please download the latest install tar files from the below sites +1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11) +2. [TensorRT](https://developer.nvidia.com/tensorrt) +3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) + +
+ + + +## Using Docker (Recommended on x86 systems) + + +Assuming all the downloaded files are to the user home directory please do the following steps: + +1. Download CUDA 11.8 + ``` + wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run + ``` +2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + +3. Give docker permission to the current user + ``` + sudo usermod -aG docker $USER + ``` + Logout and login + Restart docker if required and confirm that Nvidia container toolkit is working by + ``` + nvidia-ctk --version + ``` +4. Check if Nvidia driver is working properly on the host. + ``` + nvidia-smi + ``` + If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access + ``` + cmr "install cuda prebuilt _driver" --version=11.8.0 + ``` +5. Build the docker container and mount the paths from the host machine. + ** You may want to change the `scratch_path` location as it can take 100s of GBs.** + ```bash + cm docker script --tags=build,nvidia,inference,server \ + --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \ + --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --imagenet_path=$HOME/imagenet-2012-val \ + --scratch_path=$HOME/mlperf_scratch \ + --docker_cm_repo=mlcommons@ck \ + --results_dir=$HOME/results_dir \ + --submission_dir=$HOME/submission_dir \ + --adr.compiler.tags=gcc + ``` + * Use `--docker_cache=no` to turn off docker caching + * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@ck"` to update the CK repository when docker caching is used + * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` + Now you'll be inside the CM Nvidia docker container and can run further scripts. + +7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps. + +
+ +
+ + + +## Without Docker + + +1. Install CUDA + If CUDA is not detected, CM should download and install it automatically when you run the workflow. + ** Nvidia drivers are expected to be installed on the system ** + +2. Install cuDNN + ```bash + cmr "get cudnn" --tar_file= + ``` +3. Install TensorRT + ```bash + cmr "get tensorrt _dev" --tar_file= + ``` + On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run. + +4. Build the Nvidia inference server + ``` + cmr "build nvidia inference server" \ + --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \ + --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --adr.compiler.tags=gcc \ + [--custom_system=no] + ``` + Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` +
+ + +## Acknowledgments + +* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin + sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org). +* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh. + + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia` + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia[,variations] [--input_flags]` + +*or* + +`cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia"` + +`cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce mlcommons mlperf inference harness nvidia-harness nvidia[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_3d-unet_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.nibabel + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pandas + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_bert_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_safetensors + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_dlrm_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.torchsnapshot + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.torchrec + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.fbgemm-gpu + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx-graphsurgeon + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.scikit-learn + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_gptj_` + - Environment variables: + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.datasets + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.simplejson + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_a100,sxm,3d-unet_,offline,run_harness` + - Workflow: + * `_a100,sxm,bert_,offline,run_harness` + - Workflow: + * `_a100,sxm,dlrm_,offline,run_harness` + - Workflow: + * `_a100,sxm,resnet50,offline,run_harness` + - Environment variables: + - *CM_MLPERF_PERFORMANCE_SAMPLE_COUNT*: `2048` + - Workflow: + * `_a100,sxm,retinanet,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE*: `300000000000` + - Workflow: + * `_a100,sxm,rnnt,offline,run_harness` + - Workflow: + * `_gptj_,build` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * `_gptj_,build_engine` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * `_gptj_,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_USE_FP8*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS*: `True` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * `_gpu_memory.16,3d-unet_,offline,run_harness` + - Workflow: + * `_gpu_memory.16,bert_,offline,run_harness` + - Workflow: + * `_gpu_memory.16,dlrm_,offline,run_harness` + - Workflow: + * `_gpu_memory.16,gptj_,offline,run_harness` + - Workflow: + * `_gpu_memory.16,resnet50,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `4` + - Workflow: + * `_gpu_memory.16,retinanet,offline,run_harness` + - Workflow: + * `_gpu_memory.16,rnnt,offline,run_harness` + - Workflow: + * `_gpu_memory.24,3d-unet_,offline,run_harness` + - Workflow: + * `_gpu_memory.24,bert_,offline,run_harness` + - Workflow: + * `_gpu_memory.24,dlrm_,offline,run_harness` + - Workflow: + * `_gpu_memory.24,gptj_,offline,run_harness` + - Workflow: + * `_gpu_memory.24,resnet50,offline,run_harness` + - Workflow: + * `_gpu_memory.24,retinanet,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - Workflow: + * `_gpu_memory.24,rnnt,offline,run_harness` + - Workflow: + * `_gpu_memory.32,3d-unet_,offline,run_harness` + - Workflow: + * `_gpu_memory.32,bert_,offline,run_harness` + - Workflow: + * `_gpu_memory.32,dlrm_,offline,run_harness` + - Workflow: + * `_gpu_memory.32,gptj_,offline,run_harness` + - Workflow: + * `_gpu_memory.32,resnet50,offline,run_harness` + - Workflow: + * `_gpu_memory.32,retinanet,offline,run_harness` + - Workflow: + * `_gpu_memory.32,rnnt,offline,run_harness` + - Workflow: + * `_gpu_memory.40,3d-unet_,offline,run_harness` + - Workflow: + * `_gpu_memory.40,bert_,offline,run_harness` + - Workflow: + * `_gpu_memory.40,dlrm_,offline,run_harness` + - Workflow: + * `_gpu_memory.40,gptj_,offline,run_harness` + - Workflow: + * `_gpu_memory.40,resnet50,offline,run_harness` + - Workflow: + * `_gpu_memory.40,retinanet,offline,run_harness` + - Workflow: + * `_gpu_memory.40,rnnt,offline,run_harness` + - Workflow: + * `_gpu_memory.48,3d-unet_,offline,run_harness` + - Workflow: + * `_gpu_memory.48,bert_,offline,run_harness` + - Workflow: + * `_gpu_memory.48,dlrm_,offline,run_harness` + - Workflow: + * `_gpu_memory.48,gptj_,offline,run_harness` + - Workflow: + * `_gpu_memory.48,resnet50,offline,run_harness` + - Workflow: + * `_gpu_memory.48,retinanet,offline,run_harness` + - Workflow: + * `_gpu_memory.48,rnnt,offline,run_harness` + - Workflow: + * `_gpu_memory.80,3d-unet_,offline,run_harness` + - Workflow: + * `_gpu_memory.80,bert_,server,run_harness` + - Workflow: + * `_gpu_memory.80,dlrm_,offline,run_harness` + - Workflow: + * `_gpu_memory.80,gptj_,offline,run_harness` + - Workflow: + * `_gpu_memory.80,resnet50,offline,run_harness` + - Workflow: + * `_gpu_memory.80,retinanet,offline,run_harness` + - Workflow: + * `_gpu_memory.80,rnnt,offline,run_harness` + - Workflow: + * `_l4,3d-unet_,offline,run_harness` + - Workflow: + * `_l4,bert_,offline,run_harness` + - Workflow: + * `_l4,bert_,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN*: `200` + - *CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS*: `1` + - *CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP*: `1.0` + - *CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN*: `True` + - Workflow: + * `_l4,dlrm_,offline,run_harness` + - Workflow: + * `_l4,resnet50` + - Workflow: + * `_l4,resnet50,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `1` + - *CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS*: `True` + - Workflow: + * `_l4,resnet50,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `9` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC*: `2000` + - *CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE*: `True` + - Workflow: + * `_l4,retinanet,offline,run_harness` + - Workflow: + * `_l4,retinanet,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC*: `30000` + - *CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE*: `20000000000` + - Workflow: + * `_l4,rnnt,offline,run_harness` + - Workflow: + * `_l4,rnnt,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE*: `64` + - *CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES*: `1024` + - *CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS*: `1024` + - Workflow: + * `_multistream,resnet50` + - Environment variables: + - *SKIP_POLICIES*: `1` + - Workflow: + * `_orin,rnnt,singlestream,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS*: `1` + - Workflow: + * `_resnet50,multistream,run_harness,num-gpus.1` + - Workflow: + * `_resnet50,multistream,run_harness,num-gpus.2` + - Workflow: + * `_resnet50,server,run_harness` + - Workflow: + * `_retinanet,multistream,run_harness` + - Workflow: + * `_retinanet,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - Workflow: + * `_rtx_4090,3d-unet_,offline,run_harness` + - Workflow: + * `_rtx_4090,3d-unet_,server,run_harness` + - Workflow: + * `_rtx_4090,bert_,offline,run_harness` + - Workflow: + * `_rtx_4090,bert_,server,run_harness` + - Workflow: + * `_rtx_4090,dlrm_,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART*: `0.30` + - Workflow: + * `_rtx_4090,gptj_,offline,run_harness` + - Workflow: + * `_rtx_4090,gptj_,server,run_harness` + - Workflow: + * `_rtx_4090,resnet50,offline,run_harness` + - Workflow: + * `_rtx_4090,resnet50,server,run_harness` + - Workflow: + * `_rtx_4090,retinanet,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - Workflow: + * `_rtx_4090,retinanet,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - Workflow: + * `_rtx_4090,rnnt,offline,run_harness` + - Workflow: + * `_rtx_4090,rnnt,server,run_harness` + - Workflow: + * `_rtx_6000_ada,3d-unet_,offline,run_harness` + - Workflow: + * `_rtx_6000_ada,3d-unet_,server,run_harness` + - Workflow: + * `_rtx_6000_ada,bert_,offline,run_harness` + - Workflow: + * `_rtx_6000_ada,bert_,server,run_harness` + - Workflow: + * `_rtx_6000_ada,dlrm_,offline,run_harness` + - Workflow: + * `_rtx_6000_ada,resnet50,offline,run_harness` + - Workflow: + * `_rtx_6000_ada,resnet50,server,run_harness` + - Workflow: + * `_rtx_6000_ada,retinanet,offline,run_harness` + - Workflow: + * `_rtx_6000_ada,retinanet,server,run_harness` + - Workflow: + * `_rtx_6000_ada,rnnt,offline,run_harness` + - Workflow: + * `_rtx_6000_ada,rnnt,server,run_harness` + - Workflow: + * `_rtx_a6000,3d-unet_,offline,run_harness` + - Workflow: + * `_rtx_a6000,3d-unet_,server,run_harness` + - Workflow: + * `_rtx_a6000,bert_,offline,run_harness` + - Workflow: + * `_rtx_a6000,bert_,server,run_harness` + - Workflow: + * `_rtx_a6000,dlrm_,offline,run_harness` + - Workflow: + * `_rtx_a6000,resnet50,offline,run_harness` + - Workflow: + * `_rtx_a6000,resnet50,server,run_harness` + - Workflow: + * `_rtx_a6000,retinanet,offline,run_harness` + - Workflow: + * `_rtx_a6000,retinanet,server,run_harness` + - Workflow: + * `_rtx_a6000,rnnt,offline,run_harness` + - Workflow: + * `_rtx_a6000,rnnt,server,run_harness` + - Workflow: + * `_run-harness` + - Workflow: + * `_singlestream,resnet50` + - Environment variables: + - *SKIP_POLICIES*: `1` + - Workflow: + * `_singlestream,run_harness` + - Workflow: + * `_t4,3d-unet_,offline,run_harness` + - Workflow: + * `_t4,bert_,offline,run_harness` + - Workflow: + * `_t4,bert_,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN*: `240` + - *CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS*: `0` + - *CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN*: `no` + - Workflow: + * `_t4,dlrm_,offline,run_harness` + - Workflow: + * `_t4,resnet50` + - Workflow: + * `_t4,resnet50,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `4` + - Workflow: + * `_t4,resnet50,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `4` + - *CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC*: `2000` + - *CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP*: `0.993` + - Workflow: + * `_t4,retinanet,offline,run_harness` + - Workflow: + * `_t4,retinanet,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `2` + - *CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC*: `20000` + - *CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE*: `20000000000` + - Workflow: + * `_t4,rnnt,offline,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `4` + - *CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE*: `128` + - *CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN*: `True` + - Workflow: + * `_t4,rnnt,server,run_harness` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS*: `4` + - *CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS*: `True` + - *CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE*: `128` + - *CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN*: `True` + - Workflow: + +
+ + + * Group "**backend**" +
+ Click here to expand this section. + + * **`_tensorrt`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `tensorrt` + - *CM_MLPERF_BACKEND_NAME*: `TensorRT` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_MODEL_BATCH_SIZE*: `#` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE*: `#` + - Workflow: + +
+ + + * Group "**build-engine-options**" +
+ Click here to expand this section. + + * `_build_engine_options.#` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS*: `#` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * `_cpu` + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * **`_cuda`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + +
+ + + * Group "**device-memory**" +
+ Click here to expand this section. + + * `_gpu_memory.16` + - Environment variables: + - *CM_NVIDIA_GPU_MEMORY*: `16` + - Workflow: + * `_gpu_memory.24` + - Environment variables: + - *CM_NVIDIA_GPU_MEMORY*: `24` + - Workflow: + * `_gpu_memory.32` + - Environment variables: + - *CM_NVIDIA_GPU_MEMORY*: `32` + - Workflow: + * `_gpu_memory.40` + - Environment variables: + - *CM_NVIDIA_GPU_MEMORY*: `40` + - Workflow: + * `_gpu_memory.48` + - Environment variables: + - *CM_NVIDIA_GPU_MEMORY*: `48` + - Workflow: + * `_gpu_memory.8` + - Environment variables: + - *CM_NVIDIA_GPU_MEMORY*: `8` + - Workflow: + * `_gpu_memory.80` + - Environment variables: + - *CM_NVIDIA_GPU_MEMORY*: `80` + - Workflow: + +
+ + + * Group "**dla-batch-size**" +
+ Click here to expand this section. + + * `_dla_batch_size.#` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE*: `#` + - *CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2*: `dla_batch_size.#` + - Workflow: + +
+ + + * Group "**gpu-connection**" +
+ Click here to expand this section. + + * `_pcie` + - Workflow: + * `_sxm` + - Workflow: + +
+ + + * Group "**gpu-name**" +
+ Click here to expand this section. + + * `_a100` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - Workflow: + * `_a6000` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - Workflow: + * `_custom` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - *CM_MODEL_BATCH_SIZE*: `` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE*: `<<>>` + - Workflow: + * `_l4` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - Workflow: + * `_orin` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - *CM_MODEL_BATCH_SIZE*: `` + - *CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE*: `<<>>` + - Workflow: + * `_rtx_4090` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - Workflow: + * `_rtx_6000_ada` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - Workflow: + * `_t4` + - Environment variables: + - *CM_NVIDIA_CUSTOM_GPU*: `yes` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * `_offline` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - *CUDA_VISIBLE_DEVICES_NOT_USED*: `0` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_3d-unet-99` + - Environment variables: + - *CM_MODEL*: `3d-unet-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - Workflow: + * `_3d-unet-99.9` + - Environment variables: + - *CM_MODEL*: `3d-unet-99.9` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - Workflow: + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - *CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - *CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3733910/files/model.onnx` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp16` + - Workflow: + * `_dlrm-v2-99` + - Environment variables: + - *CM_MODEL*: `dlrm-v2-99` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp16` + - Workflow: + * `_dlrm-v2-99.9` + - Environment variables: + - *CM_MODEL*: `dlrm-v2-99.9` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp16` + - Workflow: + * `_gptj-99` + - Environment variables: + - *CM_MODEL*: `gptj-99` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp16` + - Workflow: + * `_gptj-99.9` + - Environment variables: + - *CM_MODEL*: `gptj-99.9` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp16` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_onnx-graphsurgeon + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_Pillow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pycocotools + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx-graphsurgeon + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_rnnt` + - Environment variables: + - *CM_MODEL*: `rnnt` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, affine fusion` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp16` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp16` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_toml + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + * CM names: `--adr.['torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_nvidia-apex + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_unidecode + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_inflect + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_librosa + * CM names: `--adr.['librosa']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_sox + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-sys-util,_sox + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + +
+ + + * Group "**num-gpus**" +
+ Click here to expand this section. + + * `_num-gpus.#` + - Environment variables: + - *CM_NVIDIA_NUM_GPUS*: `#` + - Workflow: + * **`_num-gpus.1`** (default) + - Environment variables: + - *CM_NVIDIA_NUM_GPUS*: `1` + - Workflow: + +
+ + + * Group "**power-mode**" +
+ Click here to expand this section. + + * `_maxn` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_MAXN*: `True` + - Workflow: + * `_maxq` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_MAXQ*: `True` + - Workflow: + +
+ + + * Group "**run-mode**" +
+ Click here to expand this section. + + * `_build` + - Environment variables: + - *MLPERF_NVIDIA_RUN_COMMAND*: `build` + - *CM_MLPERF_NVIDIA_HARNESS_RUN_MODE*: `build` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,generic,sys-util,_glog-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_gflags-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libgmock-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libre2-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libnuma-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libboost-all-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_rapidjson-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,cuda,_cudnn + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,tensorrt + * CM names: `--adr.['tensorrt']...` + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + * build,nvidia,inference,server + * CM names: `--adr.['nvidia-inference-server']...` + - CM script: [build-mlperf-inference-server-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-mlperf-inference-server-nvidia) + * `_build_engine` + - Aliases: `_build-engine` + - Environment variables: + - *MLPERF_NVIDIA_RUN_COMMAND*: `generate_engines` + - *CM_MLPERF_NVIDIA_HARNESS_RUN_MODE*: `generate_engines` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda,_cudnn + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,tensorrt + * CM names: `--adr.['tensorrt']...` + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + * build,nvidia,inference,server + * CM names: `--adr.['nvidia-inference-server']...` + - CM script: [build-mlperf-inference-server-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-mlperf-inference-server-nvidia) + * reproduce,mlperf,inference,nvidia,harness,_preprocess_data + * `if (CM_MODEL not in ['dlrm-v2-99', 'dlrm-v2-99.9'])` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + * reproduce,mlperf,inference,nvidia,harness,_download_model + * `if (CM_MODEL not in ['retinanet_old', 'resnet50', 'bert-99', 'bert-99.9', 'dlrm-v2-99', 'dlrm-v2-99.9'])` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + * reproduce,mlperf,inference,nvidia,harness,_calibrate + * `if (CM_MODEL == retinanet)` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + * `_calibrate` + - Environment variables: + - *MLPERF_NVIDIA_RUN_COMMAND*: `calibrate` + - *CM_MLPERF_NVIDIA_HARNESS_RUN_MODE*: `calibrate` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * reproduce,mlperf,inference,nvidia,harness,_download_model + * `if (CM_MODEL not in ['retinanet_old', 'resnet50', 'bert-99', 'bert-99.9', 'dlrm-v2-99', 'dlrm-v2-99.9'])` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + * `_download_model` + - Environment variables: + - *MLPERF_NVIDIA_RUN_COMMAND*: `download_model` + - *CM_MLPERF_NVIDIA_HARNESS_RUN_MODE*: `download_model` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch_cuda + * `if (CM_MODEL == retinanet)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_prebuild` + - Environment variables: + - *MLPERF_NVIDIA_RUN_COMMAND*: `prebuild` + - *CM_MLPERF_NVIDIA_HARNESS_RUN_MODE*: `prebuild` + - Workflow: + * `_preprocess_data` + - Environment variables: + - *MLPERF_NVIDIA_RUN_COMMAND*: `preprocess_data` + - *CM_MLPERF_NVIDIA_HARNESS_RUN_MODE*: `preprocess_data` + - Workflow: + * **`_run_harness`** (default) + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_RUN_MODE*: `run_harness` + - *MLPERF_NVIDIA_RUN_COMMAND*: `run_harness` + - *CM_CALL_MLPERF_RUNNER*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda,_cudnn + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,tensorrt + * CM names: `--adr.['tensorrt']...` + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + * build,nvidia,inference,server + * CM names: `--adr.['nvidia-inference-server']...` + - CM script: [build-mlperf-inference-server-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-mlperf-inference-server-nvidia) + * reproduce,mlperf,inference,nvidia,harness,_build_engine + * CM names: `--adr.['build-engine']...` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + * reproduce,mlperf,inference,nvidia,harness,_preprocess_data + * `if (CM_MODEL not in ['dlrm-v2-99', 'dlrm-v2-99.9'])` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + * reproduce,mlperf,inference,nvidia,harness,_download_model + * `if (CM_MODEL not in ['retinanet', 'resnet50', 'bert-99', 'bert-99.9', 'dlrm-v2-99', 'dlrm-v2-99.9'])` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + +
+ + + * Group "**triton**" +
+ Click here to expand this section. + + * `_use_triton` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_USE_TRITON*: `yes` + - *CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3*: `using_triton` + - Workflow: + +
+ + +#### Default variations + +`_cuda,_num-gpus.1,_resnet50,_run_harness,_tensorrt` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--audio_buffer_num_lines=value` → `CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES=value` +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--deque_timeout_usec=value` → `CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC=value` +* `--devices=value` → `CM_MLPERF_NVIDIA_HARNESS_DEVICES=value` +* `--dla_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE=value` +* `--dla_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS=value` +* `--dla_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS=value` +* `--embedding_weights_on_gpu_part=value` → `CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART=value` +* `--enable_sort=value` → `CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT=value` +* `--end_on_device=value` → `CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE=value` +* `--extra_run_options=value` → `CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS=value` +* `--gpu_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE=value` +* `--gpu_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS=value` +* `--gpu_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS=value` +* `--graphs_max_seqlen=value` → `CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN=value` +* `--input_format=value` → `CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT=value` +* `--log_dir=value` → `CM_MLPERF_NVIDIA_HARNESS_LOG_DIR=value` +* `--make_cmd=value` → `MLPERF_NVIDIA_RUN_COMMAND=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--max_dlas=value` → `CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--num_issue_query_threads=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS=value` +* `--num_sort_segments=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS=value` +* `--num_warmups=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--power_setting=value` → `CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING=value` +* `--rerun=value` → `CM_RERUN=value` +* `--run_infer_on_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--skip_postprocess=value` → `CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS=value` +* `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--soft_drop=value` → `CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP=value` +* `--start_from_device=value` → `CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--use_cuda_thread_per_device=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE=value` +* `--use_deque_limit=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT=value` +* `--use_fp8=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_FP8=value` +* `--use_graphs=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS=value` +* `--use_small_tile_gemm_plugin=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN=value` +* `--use_triton=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_TRITON=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` +* `--workspace_size=value` → `CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "audio_buffer_num_lines":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` +* CM_FAST_COMPILATION: `yes` +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_MLPERF_LOADGEN_MODE: `performance` +* CM_SKIP_PREPROCESS_DATASET: `no` +* CM_SKIP_MODEL_DOWNLOAD: `no` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia_original` +* CM_MLPERF_SKIP_RUN: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,mlperf,inference,nvidia,scratch,space + * CM names: `--adr.['nvidia-scratch-space']...` + - CM script: [get-mlperf-inference-nvidia-scratch-space](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space) + * get,generic-python-lib,_mlperf_logging + * CM names: `--adr.['mlperf-logging']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,dataset,original,imagenet,_full + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['imagenet-original']...` + - CM script: [get-dataset-imagenet-val](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-val) + * get,ml-model,resnet50,_fp32,_onnx,_opset-8 + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['resnet50-model', 'ml-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,dataset,original,kits19 + * `if (CM_MODEL in ['3d-unet-99-disabled', '3d-unet-99.9-disabled'])` + * CM names: `--adr.['kits19-original']...` + - CM script: [get-dataset-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-kits19) + * get,dataset,original,librispeech + * `if (CM_MODEL == rnnt)` + * CM names: `--adr.['librispeech-original']...` + - CM script: [get-dataset-librispeech](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-librispeech) + * get,dataset,preprocessed,criteo + * `if (CM_MODEL in ['dlrm-v2-99', 'dlrm-v2-99.9']) AND (DLRM_DATA_PATH != True)` + * CM names: `--adr.['criteo-preprocessed']...` + - CM script: [get-preprocessed-dataset-criteo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-criteo) + * get,ml-model,dlrm,_pytorch + * `if (CM_MODEL in ['dlrm-v2-99', 'dlrm-v2-99.9']) AND (DLRM_DATA_PATH != True)` + * CM names: `--adr.['dlrm-model']...` + - CM script: [get-ml-model-dlrm-terabyte](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-dlrm-terabyte) + * get,ml-model,bert,_onnx,_fp32 + * `if (CM_MODEL in ['bert-99', 'bert-99.9'])` + * CM names: `--adr.['bert-model', 'bert-model-fp32']...` + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + * get,ml-model,bert,_onnx,_int8 + * `if (CM_MODEL in ['bert-99', 'bert-99.9'])` + * CM names: `--adr.['bert-model', 'bert-model-int8']...` + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + * get,squad-vocab + * `if (CM_MODEL in ['bert-99', 'bert-99.9'])` + * CM names: `--adr.['bert-vocab']...` + - CM script: [get-dataset-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad-vocab) + * get,dataset,original,openimages,_validation,_full,_custom-annotations + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['openimages-original']...` + - CM script: [get-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages) + * get,dataset,original,openimages,_calibration + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['openimages-calibration']...` + - CM script: [get-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages) + * get,dataset,original,openorca + * `if (CM_MODEL in ['gptj-99', 'gptj-99.9'] AND CM_MLPERF_NVIDIA_HARNESS_RUN_MODE == preprocess_dataset)` + * CM names: `--adr.['openorca-original']...` + - CM script: [get-dataset-openorca](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openorca) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,nvidia,mlperf,inference,common-code + * CM names: `--adr.['nvidia-inference-common-code']...` + - CM script: [get-mlperf-inference-nvidia-common-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code) + * generate,user-conf,mlperf,inference + * `if (CM_MLPERF_NVIDIA_HARNESS_RUN_MODE == run_harness)` + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + * get,generic-python-lib,_package.nvmitten,_path./opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl + * `if (CM_RUN_STATE_DOCKER in ['yes', True, 'True'])` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,nvidia,mitten + * `if (CM_RUN_STATE_DOCKER not in ['yes', True, 'True'])` + - CM script: [get-nvidia-mitten](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-nvidia-mitten) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia/_cm.yaml)*** + * get,ml-model,gptj,_pytorch,_rclone + * `if (CM_REQUIRE_GPTJ_MODEL_DOWNLOAD == yes AND CM_MLPERF_NVIDIA_HARNESS_RUN_MODE in ['download_model', 'preprocess_data'])` + * CM names: `--adr.['gptj-model']...` + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-nvidia/_cm.yaml)*** + * benchmark-mlperf + * `if (CM_CALL_MLPERF_RUNNER == True) AND (CM_MLPERF_SKIP_RUN not in ['yes', True])` + * CM names: `--adr.['runner', 'mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + +___ +### Script output +`cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/app-mlperf-inference-nvidia/_cm.yaml b/script/app-mlperf-inference-nvidia/_cm.yaml new file mode 100644 index 0000000000..19e789ae0d --- /dev/null +++ b/script/app-mlperf-inference-nvidia/_cm.yaml @@ -0,0 +1,1477 @@ +# Identification of this CM script +alias: app-mlperf-inference-nvidia +uid: bc3b17fb430f4732 +cache: false +can_force_cache: true + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Reproduce MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - nvidia-harness + - nvidia + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: 'yes' + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + # SKIP_POLICIES: '1' + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia_original + CM_MLPERF_SKIP_RUN: 'no' +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + devices: CM_MLPERF_NVIDIA_HARNESS_DEVICES + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + use_triton: CM_MLPERF_NVIDIA_HARNESS_USE_TRITON + gpu_copy_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS + gpu_inference_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS + gpu_batch_size: CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE + dla_copy_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS + dla_inference_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS + dla_batch_size: CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE + input_format: CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + workspace_size: CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE + log_dir: CM_MLPERF_NVIDIA_HARNESS_LOG_DIR + use_graphs: CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS + run_infer_on_copy_streams: CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS + start_from_device: CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE + end_on_device: CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE + max_dlas: CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS + power_setting: CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING + make_cmd: MLPERF_NVIDIA_RUN_COMMAND + rerun: CM_RERUN + extra_run_options: CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS + use_deque_limit: CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT + deque_timeout_usec: CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC + use_cuda_thread_per_device: CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE + num_warmups: CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS + graphs_max_seqlen: CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN + num_issue_query_threads: CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS + soft_drop: CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP + use_small_tile_gemm_plugin: CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN + audio_buffer_num_lines: CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES + use_fp8: CM_MLPERF_NVIDIA_HARNESS_USE_FP8 + enable_sort: CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT + num_sort_segments: CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS + skip_postprocess: CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS + embedding_weights_on_gpu_part: CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Get Nvidia scratch space where data and models get downloaded + - tags: get,mlperf,inference,nvidia,scratch,space + names: + - nvidia-scratch-space + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-original + tags: get,dataset,original,imagenet,_full + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - resnet50-model + - ml-model + tags: get,ml-model,resnet50,_fp32,_onnx,_opset-8 + + ######################################################################## + # Install kits19 dataset + + - enable_if_env: + CM_MODEL: + - 3d-unet-99-disabled + - 3d-unet-99.9-disabled + names: + - kits19-original + tags: get,dataset,original,kits19 + + + ######################################################################## + # Install librispeech dataset + + - enable_if_env: + CM_MODEL: + - rnnt + names: + - librispeech-original + tags: get,dataset,original,librispeech + + ######################################################################## + # Install criteo dataset + + - enable_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + skip_if_env: + DLRM_DATA_PATH: + - on + names: + - criteo-preprocessed + tags: get,dataset,preprocessed,criteo + + ######################################################################## + # Install dlrm model + - enable_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + skip_if_env: + DLRM_DATA_PATH: + - on + names: + - dlrm-model + tags: get,ml-model,dlrm,_pytorch + + ######################################################################## + # Install bert models + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-model + - bert-model-fp32 + tags: get,ml-model,bert,_onnx,_fp32 + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-model + - bert-model-int8 + tags: get,ml-model,bert,_onnx,_int8 + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-vocab + tags: get,squad-vocab + + ######################################################################## + # Install OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-original + tags: get,dataset,original,openimages,_validation,_full,_custom-annotations + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-calibration + tags: get,dataset,original,openimages,_calibration + + + ######################################################################## + # Install openorca dataset + + - enable_if_env: + CM_MODEL: + - gptj-99 + - gptj-99.9 + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + - preprocess_dataset + names: + - openorca-original + tags: get,dataset,original,openorca + + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + enable_if_env: + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + - run_harness + + - tags: get,generic-python-lib,_package.nvmitten,_path./opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl + enable_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + - True + - 'True' + + - tags: get,nvidia,mitten + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + - True + - 'True' + +prehook_deps: + ######################################################################## + # Install GPTJ-6B model + - enable_if_env: + CM_REQUIRE_GPTJ_MODEL_DOWNLOAD: + - 'yes' + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + - download_model + - preprocess_data + names: + - gptj-model + tags: get,ml-model,gptj,_pytorch,_rclone + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + enable_if_env: + CM_CALL_MLPERF_RUNNER: + - yes + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + tensorrt: + group: backend + default: true + env: + CM_MLPERF_BACKEND: tensorrt + CM_MLPERF_BACKEND_NAME: TensorRT + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + deps: + - tags: get,generic-python-lib,_onnx-graphsurgeon + - tags: get,generic-python-lib,_package.onnx + version: 1.13.1 + + retinanet: + group: model + env: + CM_MODEL: retinanet + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + deps: + - tags: get,generic-python-lib,_Pillow + - tags: get,generic-python-lib,_torch + - tags: get,generic-python-lib,_torchvision + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pycocotools + - tags: get,generic-python-lib,_onnx-graphsurgeon + - tags: get,generic-python-lib,_package.onnx + version: 1.13.1 + + bert_: + deps: + - tags: get,generic-python-lib,_transformers + - tags: get,generic-python-lib,_safetensors + - tags: get,generic-python-lib,_onnx + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + 3d-unet_: + deps: + - tags: get,generic-python-lib,_transformers + - tags: get,generic-python-lib,_package.nibabel + - tags: get,generic-python-lib,_pandas + version_max: "1.5.3" + + 3d-unet-99: + group: model + base: + - 3d-unet_ + env: + CM_MODEL: 3d-unet-99 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + + 3d-unet-99.9: + group: model + base: + - 3d-unet_ + env: + CM_MODEL: 3d-unet-99.9 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + + rnnt: + group: model + env: + CM_MODEL: rnnt + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: fp16 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + deps: + - tags: get,generic-python-lib,_toml + - tags: get,generic-python-lib,_torchvision + names: + - torchvision + - tags: get,generic-python-lib,_torch + - tags: get,generic-python-lib,_nvidia-apex + - tags: get,generic-python-lib,_unidecode + - tags: get,generic-python-lib,_inflect + - tags: get,generic-python-lib,_librosa + names: + - librosa + - tags: get,generic-python-lib,_sox + - tags: get,generic-sys-util,_sox + + dlrm_: + deps: + - tags: get,generic-python-lib,_torch + - tags: get,generic-python-lib,_package.torchsnapshot + - tags: get,generic-python-lib,_package.torchrec + version: 0.3.2 + - tags: get,generic-python-lib,_package.fbgemm-gpu + version: 0.3.2 + - tags: get,generic-python-lib,_onnx-graphsurgeon + - tags: get,generic-python-lib,_package.scikit-learn + + dlrm-v2-99: + group: model + base: + - dlrm_ + env: + CM_MODEL: dlrm-v2-99 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + dlrm-v2-99.9: + group: model + base: + - dlrm_ + env: + CM_MODEL: dlrm-v2-99.9 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + gptj_: + deps: + - tags: get,generic-python-lib,_package.datasets + - tags: get,generic-python-lib,_package.simplejson + env: + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download" + + gptj_,build: + deps: + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + - tags: get,cmake + version_min: "3.25.0" + + gptj_,build_engine: + deps: + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + - tags: get,cmake + version_min: "3.25.0" + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + batch_size.#: + group: batch-size + env: + CM_MODEL_BATCH_SIZE: "#" + CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "#" + #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "gpu_batch_size.#" + + dla_batch_size.#: + group: dla-batch-size + env: + CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: "#" + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: "dla_batch_size.#" + adr: + build-engine: + tags: _dla_batch_size.# + + use_triton: + group: triton + env: + CM_MLPERF_NVIDIA_HARNESS_USE_TRITON: "yes" + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: "using_triton" + + prebuild: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: prebuild + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: prebuild + + build: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: build + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: build + deps: + - tags: get,cmake + version_min: "3.18" + + # Detect Google Logger + - tags: get,generic,sys-util,_glog-dev + + # Detect GFlags + - tags: get,generic,sys-util,_gflags-dev + + # Detect libgmock-dev + - tags: get,generic,sys-util,_libgmock-dev + + # Detect libre2-dev + - tags: get,generic,sys-util,_libre2-dev + + # Detect libnuma-dev + - tags: get,generic,sys-util,_libnuma-dev + + # Detect libboost-all-dev + - tags: get,generic,sys-util,_libboost-all-dev + + # Detect rapidjson-dev + - tags: get,generic,sys-util,_rapidjson-dev + + # Detect CUDA + - names: + - cuda + tags: get,cuda,_cudnn + + # Detect Tensorrt + - names: + - tensorrt + tags: get,tensorrt + + # Build nvidia inference server + - names: + - nvidia-inference-server + tags: build,nvidia,inference,server + + + maxq: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + + maxn: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + + preprocess-data: + alias: preprocess-data + + preprocess_data: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: preprocess_data + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: preprocess_data + + download-model: + alias: download-model + + download_model: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: download_model + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: download_model + deps: + - tags: get,generic-python-lib,_torch_cuda + enable_if_env: + CM_MODEL: + - retinanet + + calibrate: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: calibrate + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: calibrate + deps: + - tags: reproduce,mlperf,inference,nvidia,harness,_download_model + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - power-mode + - batch-size + - triton + skip_if_env: + CM_MODEL: + - retinanet_old + - resnet50 + - bert-99 + - bert-99.9 + - dlrm-v2-99 + - dlrm-v2-99.9 + + build-engine: + alias: build_engine + + build_engine: + group: run-mode + default_variations: + loadgen-scenario: offline + env: + MLPERF_NVIDIA_RUN_COMMAND: generate_engines + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: generate_engines + deps: + # Detect CUDA + - names: + - cuda + tags: get,cuda,_cudnn + + # Detect Tensorrt + - names: + - tensorrt + tags: get,tensorrt + + # Build nvidia inference server + - names: + - nvidia-inference-server + tags: build,nvidia,inference,server + + - tags: reproduce,mlperf,inference,nvidia,harness,_preprocess_data + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - batch-size + - num-gpus + - triton + - build-engine-options + skip_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + + - tags: reproduce,mlperf,inference,nvidia,harness,_download_model + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - num-gpus + - batch-size + - triton + - power-mode + - build-engine-options + skip_if_env: + CM_MODEL: + - retinanet_old + - resnet50 + - bert-99 + - bert-99.9 + - dlrm-v2-99 + - dlrm-v2-99.9 + - tags: reproduce,mlperf,inference,nvidia,harness,_calibrate + inherit_variation_tags: true + enable_if_env: + CM_MODEL: + - retinanet + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - device-type + - num-gpus + - power-mode + - gpu-name + - triton + - batch-size + - build-engine-options + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + CUDA_VISIBLE_DEVICES_NOT_USED: "0" + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + run-harness: + alis: run_harness + + run_harness: + group: run-mode + default: true + default_variations: + loadgen-scenario: offline + deps: + # Detect CUDA + - names: + - cuda + tags: get,cuda,_cudnn + + # Detect Tensorrt + - names: + - tensorrt + tags: get,tensorrt + + # Build nvidia inference server + - names: + - nvidia-inference-server + tags: build,nvidia,inference,server + - tags: reproduce,mlperf,inference,nvidia,harness,_build_engine + inherit_variation_tags: true + names: + - build-engine + skip_inherit_variation_groups: + - run-mode + - gpu-name + - num-gpus + - device-memory + force_cache: true + + - tags: reproduce,mlperf,inference,nvidia,harness,_preprocess_data + inherit_variation_tags: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - num-gpus + - device-memory + - power-mode + - gpu-name + - batch-size + - triton + - build-engine-options + force_cache: true + skip_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + - tags: reproduce,mlperf,inference,nvidia,harness,_download_model + inherit_variation_tags: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - num-gpus + - power-mode + - batch-size + - build-engine-options + force_cache: true + skip_if_env: + CM_MODEL: + - retinanet + - resnet50 + - bert-99 + - bert-99.9 + - dlrm-v2-99 + - dlrm-v2-99.9 + env: + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: run_harness + MLPERF_NVIDIA_RUN_COMMAND: run_harness + CM_CALL_MLPERF_RUNNER: 'yes' + new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_ML_MODEL_* + - CM_HW_NAME + - CM_MAX_EXAMPLES + new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + + build_engine_options.#: + group: build-engine-options + env: + CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: "#" + + gpu_memory.16: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "16" + gpu_memory.24: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "24" + gpu_memory.8: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "8" + gpu_memory.32: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "32" + gpu_memory.40: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "40" + gpu_memory.48: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "48" + gpu_memory.80: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "80" + + singlestream,resnet50: + env: + SKIP_POLICIES: '1' + + multistream,resnet50: + env: + SKIP_POLICIES: '1' + + singlestream,run_harness: + default_variations: + batch-size: batch_size.1 + + gptj_,run_harness: + deps: + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + - tags: get,cmake + version_min: "3.25.0" + env: + CM_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True' + CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT: 'True' + CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS: '2' + CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS: True + + gpu_memory.16,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.24,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.7 + + gpu_memory.32,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.48,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.14 + + gpu_memory.40,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.10 + + gpu_memory.80,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.32 + + gpu_memory.16,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.24,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.32,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.48,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.1024 + + gpu_memory.40,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.80,bert_,server,run_harness: + default_variations: + batch-size: batch_size.64 + + gpu_memory.16,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.1024 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + + gpu_memory.40,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.24,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + gpu_memory.32,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.48,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.80,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + num-gpus.#: + group: num-gpus + env: + CM_NVIDIA_NUM_GPUS: "#" + + num-gpus.1: + group: num-gpus + default: true + env: + CM_NVIDIA_NUM_GPUS: "1" + + resnet50,server,run_harness: + default_variations: + batch-size: batch_size.64 + + resnet50,multistream,run_harness,num-gpus.1: + default_variations: + batch-size: batch_size.8 + + resnet50,multistream,run_harness,num-gpus.2: + default_variations: + batch-size: batch_size.4 + + retinanet,multistream,run_harness: + default_variations: + batch-size: batch_size.2 + + gpu_memory.16,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + gpu_memory.40,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.32,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.48,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.24,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + gpu_memory.80,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + retinanet,server,run_harness: + default_variations: + batch-size: batch_size.8 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + gpu_memory.16,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.1024 + + gpu_memory.40,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.24,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.32,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.48,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.80,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.16,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.40,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.24,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.80,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.32,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.48,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.16,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + gpu_memory.40,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + gpu_memory.24,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + gpu_memory.32,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + gpu_memory.48,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + gpu_memory.80,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + orin: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config + CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" + + orin,rnnt,singlestream,run_harness: + env: + CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1" + + rtx_4090: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + rtx_4090,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + rtx_4090,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.32 + + rtx_4090,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + rtx_4090,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + rtx_4090,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_4090,bert_,server,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_4090,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_4090,3d-unet_,server,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_4090,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + rtx_4090,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.2048 + + rtx_4090,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.7 + + rtx_4090,gptj_,server,run_harness: + default_variations: + batch-size: batch_size.7 + + rtx_4090,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + env: + CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30" + + a6000: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + rtx_a6000,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + rtx_a6000,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.32 + + rtx_a6000,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_a6000,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_a6000,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_a6000,bert_,server,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_a6000,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_a6000,3d-unet_,server,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_a6000,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + rtx_a6000,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.512 + + rtx_a6000,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + rtx_6000_ada: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + rtx_6000_ada,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + rtx_6000_ada,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.32 + + rtx_6000_ada,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_6000_ada,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_6000_ada,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_6000_ada,bert_,server,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_6000_ada,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_6000_ada,3d-unet_,server,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_6000_ada,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.512 + + rtx_6000_ada,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.512 + + rtx_6000_ada,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + l4: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + l4,resnet50: + default_env: + CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 10500 + CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 9000 + CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.35 + CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 1 + + l4,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.32 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "1" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + + l4,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.16 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "9" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: 'True' + + l4,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + l4,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 30000 + CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 + + l4,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.16 + + l4,bert_,server,run_harness: + default_variations: + batch-size: batch_size.16 + env: + CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "200" + CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "1" + CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "1.0" + CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "True" + + l4,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.1 + + l4,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.512 + + l4,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.512 + env: + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "64" + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES: "1024" + CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1024" + + l4,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + t4: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + t4,resnet50: + default_env: + CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4900 + CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 4000 + CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.6 + CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 2 + + t4,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.256 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + + t4,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.26 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "0.993" + + t4,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + t4,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 20000 + CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 + + t4,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + t4,bert_,server,run_harness: + default_variations: + batch-size: batch_size.4 + env: + CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "240" + CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "0" + CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "no" + + t4,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + t4,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" + CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" + + t4,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.2048 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" + CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" + + t4,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + pcie: + group: gpu-connection + + sxm: + group: gpu-connection + + custom: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config + CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" + + a100: + default_variation: + gpu-connection: sxm + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + a100,sxm,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + env: + CM_MLPERF_PERFORMANCE_SAMPLE_COUNT: "2048" + + a100,sxm,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.32 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: "300000000000" + + a100,sxm,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + a100,sxm,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + a100,sxm,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + a100,sxm,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + +docker: + docker_real_run: False diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py new file mode 100644 index 0000000000..2d995af9c4 --- /dev/null +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -0,0 +1,428 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MODEL', '') == '': + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + + make_command = env['MLPERF_NVIDIA_RUN_COMMAND'] + + if env.get('CM_MLPERF_DEVICE', '') == '': + return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes" and make_command == "run_harness": + return {'return': 0} + + env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + + cmds = [] + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + mode = env['CM_MLPERF_LOADGEN_MODE'] + + make_command = env['MLPERF_NVIDIA_RUN_COMMAND'] + + if make_command == "prebuild": + cmds.append(f"make prebuild NETWORK_NODE=SUT") + + if env['CM_MODEL'] == "resnet50": + target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'imagenet') + if not os.path.exists(target_data_path): + cmds.append(f"ln -sf {env['CM_DATASET_IMAGENET_PATH']} {target_data_path}") + + model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'ResNet50', 'resnet50_v1.onnx') + + if not os.path.exists(os.path.dirname(model_path)): + cmds.append(f"mkdir -p {os.path.dirname(model_path)}") + + if not os.path.exists(model_path): + cmds.append(f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") + model_name = "resnet50" + + elif "bert" in env['CM_MODEL']: + target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'squad') + if not os.path.exists(target_data_path): + cmds.append("make download_data BENCHMARKS='bert'") + + fp32_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'bert_large_v1_1.onnx') + int8_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'bert_large_v1_1_fake_quant.onnx') + vocab_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'vocab.txt') + + if not os.path.exists(os.path.dirname(fp32_model_path)): + cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") + + if not os.path.exists(fp32_model_path): + cmds.append(f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}") + if not os.path.exists(int8_model_path): + cmds.append(f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}") + if not os.path.exists(vocab_path): + cmds.append(f"ln -sf {env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}") + model_name = "bert" + model_path = fp32_model_path + + elif "3d-unet" in env['CM_MODEL']: + target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'kits19', 'data') + target_data_path_base_dir = os.path.dirname(target_data_path) + if not os.path.exists(target_data_path_base_dir): + cmds.append(f"mkdir -p {target_data_path_base_dir}") + + if not os.path.exists(target_data_path): + #cmds.append(f"ln -sf {env['CM_DATASET_PATH']} {target_data_path}") + cmds.append("make download_data BENCHMARKS='3d-unet'") + + model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', '3d-unet-kits19', '3dUNetKiTS19.onnx') + model_name = "3d-unet" + + elif "rnnt" in env['CM_MODEL']: + target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'LibriSpeech', 'dev-clean') + target_data_path_base_dir = os.path.dirname(target_data_path) + if not os.path.exists(target_data_path_base_dir): + cmds.append(f"mkdir -p {target_data_path_base_dir}") + if not os.path.exists(target_data_path): + #cmds.append(f"ln -sf {env['CM_DATASET_LIBRISPEECH_PATH']} {target_data_path}") + cmds.append("make download_data BENCHMARKS='rnnt'") + + model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'rnn-t', 'DistributedDataParallel_1576581068.9962234-epoch-100.pt') + model_name = "rnnt" + + elif "pdlrm" in env['CM_MODEL']: + target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'criteo') + if not os.path.exists(target_data_path): + cmds.append(f"ln -sf {env['CM_DATASET_PREPROCESSED_PATH']} {target_data_path}") + + model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'dlrm', 'tb00_40M.pt') + if not os.path.exists(os.path.dirname(model_path)): + cmds.append(f"mkdir -p {os.path.dirname(model_path)}") + + if not os.path.exists(model_path): + cmds.append(f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") + model_name = "dlrm" + + elif "dlrm-v2" in env['CM_MODEL']: + model_name = "dlrm-v2" + + elif env['CM_MODEL'] == "retinanet": + #print(env) + dataset_path = env['CM_DATASET_PATH'] + #return {'return': 1, 'error': 'error'} + + annotations_path = env['CM_DATASET_ANNOTATIONS_DIR_PATH'] + target_data_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf') + if not os.path.exists(target_data_path_dir): + cmds.append(f"mkdir -p {target_data_path_dir}") + target_data_path = os.path.join(target_data_path_dir, 'annotations') + if not os.path.exists(target_data_path): + cmds.append(f"ln -sf {annotations_path} {target_data_path}") + + target_data_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf', 'validation') + if not os.path.exists(target_data_path_dir): + cmds.append(f"mkdir -p {target_data_path_dir}") + target_data_path = os.path.join(target_data_path_dir, 'data') + if not os.path.exists(target_data_path): + cmds.append(f"ln -sf {dataset_path} {target_data_path}") + + calibration_dataset_path=env['CM_CALIBRATION_DATASET_PATH'] + target_data_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf','calibration', 'train') + if not os.path.exists(target_data_path_dir): + cmds.append(f"mkdir -p {target_data_path_dir}") + target_data_path = os.path.join(target_data_path_dir, 'data') + if not os.path.exists(target_data_path): + cmds.append(f"ln -sf {calibration_dataset_path} {target_data_path}") + + preprocessed_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data') + target_model_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'retinanet-resnext50-32x4d') + if not os.path.exists(target_model_path_dir): + cmds.append(f"mkdir -p {target_model_path_dir}") + model_path = os.path.join(target_model_path_dir, 'retinanet-fpn-torch2.1-postprocessed.onnx') + alt_model_path = os.path.join(target_model_path_dir, 'retinanet-fpn-torch2.2-postprocessed.onnx') + if not os.path.exists(model_path) and os.path.exists(alt_model_path): + cmds.append(f"ln -s {alt_model_path} {model_path}") + + model_name = "retinanet" + + elif "gptj" in env['CM_MODEL']: + target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'cnn-daily-mail', 'cnn_eval.json') + if not os.path.exists(target_data_path): + cmds.append("make download_data BENCHMARKS='gptj'") + + fp32_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'GPTJ-6B', 'checkpoint-final') + fp8_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'GPTJ-6B', 'fp8-quantized-ammo', 'GPTJ-07142023.pth') + vocab_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'vocab.txt') + + if not os.path.exists(os.path.dirname(fp32_model_path)): + cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") + if not os.path.exists(os.path.dirname(fp8_model_path)): + cmds.append(f"mkdir -p {os.path.dirname(fp8_model_path)}") + + if not os.path.exists(fp32_model_path): + env['CM_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes' # download via prehook_deps + cmds.append(f"cp -r $CM_ML_MODEL_FILE_WITH_PATH {fp32_model_path}") + + model_name = "gptj" + model_path = fp8_model_path + #cmds.append(f"make prebuild") + if make_command == "download_model": + if not os.path.exists(model_path): + cmds.append(f"make download_model BENCHMARKS='{model_name}'") + else: + return {'return':0} + + elif make_command == "preprocess_data": + if env['CM_MODEL'] == "rnnt": + cmds.append(f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_dev_clean_500_raw')}") + cmds.append(f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_train_clean_512_wav')}") + cmds.append(f"make preprocess_data BENCHMARKS='{model_name}'") + + else: + scenario=env['CM_MLPERF_LOADGEN_SCENARIO'].lower() + + if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": + test_mode = "AccuracyOnly" + elif env['CM_MLPERF_LOADGEN_MODE'] == "performance": + test_mode = "PerformanceOnly" + elif env['CM_MLPERF_LOADGEN_MODE'] == "compliance": + test_mode = "" + test_name = env.get('CM_MLPERF_LOADGEN_COMPLIANCE_TEST', 'test01').lower() + env['CM_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format(test_name) + make_command = "run_audit_{}_once".format(test_name) + else: + return {'return': 1, 'error': 'Unsupported mode: {}'.format(env['CM_MLPERF_LOADGEN_MODE'])} + + run_config = '' + + target_qps = env.get('CM_MLPERF_LOADGEN_TARGET_QPS') + offline_target_qps = env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') + server_target_qps = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') + if target_qps: + target_qps = int(float(target_qps)) + if scenario == "offline" and not offline_target_qps: + run_config += f" --offline_expected_qps={target_qps}" + elif scenario == "server" and not server_target_qps: + run_config += f" --server_target_qps={target_qps}" + + if offline_target_qps: + offline_target_qps = int(float(offline_target_qps)) + run_config += f" --offline_expected_qps={offline_target_qps}" + if server_target_qps: + server_target_qps = int(float(server_target_qps)) + run_config += f" --server_target_qps={server_target_qps}" + + target_latency = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') + singlestream_target_latency = env.get('CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY') + multistream_target_latency = env.get('CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY') + if target_latency: + target_latency_ns = int(float(target_latency) * 1000000) + if scenario == "singlestream" and not singlestream_target_latency: + run_config += f" --single_stream_expected_latency_ns={target_latency_ns}" + elif scenario == "multistream" and not multistream_target_latency: + run_config += f" --multi_stream_expected_latency_ns={target_latency_ns}" + + if singlestream_target_latency: + singlestream_target_latency_ns = int(float(singlestream_target_latency) * 1000000) + run_config += f" --single_stream_expected_latency_ns={singlestream_target_latency_ns}" + if multistream_target_latency: + multistream_target_latency_ns = int(float(multistream_target_latency) * 1000000) + run_config += f" --multi_stream_expected_latency_ns={multistream_target_latency_ns}" + + high_accuracy = "99.9" in env['CM_MODEL'] + + config_ver_list = [] + + use_lon = env.get('CM_MLPERF_NVIDIA_HARNESS_LON') + if use_lon: + config_ver_list.append( "lon_node") + #run_config += " --lon_node" + + maxq = env.get('CM_MLPERF_NVIDIA_HARNESS_MAXQ') + if maxq: + config_ver_list.append( "maxq") + + if high_accuracy: + config_ver_list.append( "high_accuracy") + + use_triton = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_TRITON') + if use_triton: + run_config += " --use_triton " + config_ver_list.append( "triton") + + if config_ver_list: + run_config += f" --config_ver={'_'.join(config_ver_list)}" + + user_conf_path = env.get('CM_MLPERF_USER_CONF') + if user_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + run_config += f" --user_conf_path={user_conf_path}" + + mlperf_conf_path = env.get('CM_MLPERF_INFERENCE_CONF_PATH') + if mlperf_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + run_config += f" --mlperf_conf_path={mlperf_conf_path}" + + power_setting = env.get('CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING') + if power_setting and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + run_config += f" --power_setting={power_setting}" + + gpu_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS') + if gpu_copy_streams: + run_config += f" --gpu_copy_streams={gpu_copy_streams}" + + gpu_inference_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS') + if gpu_inference_streams: + run_config += f" --gpu_inference_streams={gpu_inference_streams}" + + dla_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS') + if dla_copy_streams: + run_config += f" --dla_copy_streams={dla_copy_streams}" + + dla_inference_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS') + if dla_inference_streams: + run_config += f" --dla_inference_streams={dla_inference_streams}" + + gpu_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE') + if gpu_batch_size: + run_config += f" --gpu_batch_size={gpu_batch_size}" + + dla_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE') + if dla_batch_size: + run_config += f" --dla_batch_size={dla_batch_size}" + + input_format = env.get('CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT') + if input_format: + run_config += f" --input_format={input_format}" + + performance_sample_count = env.get('CM_MLPERF_PERFORMANCE_SAMPLE_COUNT') + if performance_sample_count: + run_config += f" --performance_sample_count={performance_sample_count}" + + devices = env.get('CM_MLPERF_NVIDIA_HARNESS_DEVICES') + if devices: + run_config += f" --devices={devices}" + + audio_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE') + if audio_batch_size: + run_config += f" --audio_batch_size={audio_batch_size}" + + disable_encoder_plugin = env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN') + if disable_encoder_plugin and disable_encoder_plugin.lower() not in [ "no", "false" ]: + run_config += " --disable_encoder_plugin" + + workspace_size = env.get('CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE') + if workspace_size: + run_config += f" --workspace_size={workspace_size}" + + if env.get('CM_MLPERF_LOADGEN_LOGS_DIR'): + env['MLPERF_LOADGEN_LOGS_DIR'] = env['CM_MLPERF_LOADGEN_LOGS_DIR'] + + log_dir = env.get('CM_MLPERF_NVIDIA_HARNESS_LOG_DIR') + if log_dir: + run_config += f" --log_dir={log_dir}" + + use_graphs = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS') + if use_graphs and use_graphs.lower() not in [ "no", "false" ]: + run_config += " --use_graphs" + + use_deque_limit = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT') + if use_deque_limit and use_deque_limit.lower() not in [ "no", "false" ]: + run_config += " --use_deque_limit" + + deque_timeout_usec = env.get('CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC') + if deque_timeout_usec: + run_config += f" --deque_timeout_usec={deque_timeout_usec}" + + use_cuda_thread_per_device = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE') + if use_cuda_thread_per_device and use_cuda_thread_per_device.lower() not in [ "no", "false" ]: + run_config += " --use_cuda_thread_per_device" + + run_infer_on_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS') + if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [ "no", "false" ]: + run_config += " --run_infer_on_copy_streams" + + start_from_device = env.get('CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE') + if start_from_device and start_from_device.lower() not in [ "no", "false" ]: + run_config += " --start_from_device" + + end_on_device = env.get('CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE') + if end_on_device and end_on_device.lower() not in [ "no", "false" ]: + run_config += " --end_on_device" + + max_dlas = env.get('CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS') + if max_dlas: + run_config += f" --max_dlas={max_dlas}" + + graphs_max_seqlen = env.get('CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN') + if graphs_max_seqlen: + run_config += f" --graphs_max_seqlen={graphs_max_seqlen}" + + num_issue_query_threads = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS') + if num_issue_query_threads: + run_config += f" --num_issue_query_threads={num_issue_query_threads}" + + soft_drop = env.get('CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP') + if soft_drop: + run_config += f" --soft_drop={soft_drop}" + + use_small_tile_gemm_plugin = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN') + if use_small_tile_gemm_plugin and use_small_tile_gemm_plugin.lower() not in [ "no", "false" ]: + run_config += f" --use_small_tile_gemm_plugin" + + audio_buffer_num_lines = env.get('CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES') + if audio_buffer_num_lines: + run_config += f" --audio_buffer_num_lines={audio_buffer_num_lines}" + + use_fp8 = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_FP8') + if use_fp8 and use_fp8.lower() not in [ "no", "false" ]: + run_config += f" --use_fp8" + + enable_sort = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') + if enable_sort and enable_sort.lower() not in [ "no", "false" ]: + run_config += f" --enable_sort" + + num_sort_segments = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS') + if num_sort_segments: + run_config += f" --num_sort_segments={num_sort_segments}" + + embedding_weights_on_gpu_part = env.get('CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '') + if embedding_weights_on_gpu_part != '': + run_config += f" --embedding_weights_on_gpu_part={embedding_weights_on_gpu_part}" + + num_warmups = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS', '') + if num_warmups != '': + run_config += f" --num_warmups={num_warmups}" + + skip_postprocess = env.get('CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS') + if skip_postprocess and skip_postprocess.lower() not in [ "no", "false" ]: + run_config += f" --skip_postprocess" + + if test_mode: + test_mode_string = " --test_mode={}".format(test_mode) + else: + test_mode_string = "" + + extra_build_engine_options_string = env.get('CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '') + + extra_run_options_string = env.get('CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS', '') #will be ignored during build engine + + run_config += " --no_audit_verify" + + cmds.append(f"make {make_command} RUN_ARGS=' --benchmarks={model_name} --scenarios={scenario} {test_mode_string} {run_config} {extra_build_engine_options_string} {extra_run_options_string}'") + + run_cmd = " && ".join(cmds) + env['CM_MLPERF_RUN_CMD'] = run_cmd + env['CM_RUN_CMD'] = run_cmd + env['CM_RUN_DIR'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] + +# print(env) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return':0} diff --git a/script/app-mlperf-inference-nvidia/run.sh b/script/app-mlperf-inference-nvidia/run.sh new file mode 100644 index 0000000000..ddcd0b5504 --- /dev/null +++ b/script/app-mlperf-inference-nvidia/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/script/app-mlperf-inference-qualcomm/README.md b/script/app-mlperf-inference-qualcomm/README.md new file mode 100644 index 0000000000..c070d9d4df --- /dev/null +++ b/script/app-mlperf-inference-qualcomm/README.md @@ -0,0 +1,746 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference-qualcomm** + +Category: **Modular MLPerf benchmarks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference-qualcomm,eef1aca5d7c0470e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt` + +`cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt[,variations] [--input_flags]` + +*or* + +`cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt"` + +`cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_bert_` + - Environment variables: + - *CM_BENCHMARK*: `STANDALONE_BERT` + - *kilt_model_name*: `bert` + - *kilt_model_seq_length*: `384` + - *kilt_model_bert_variant*: `BERT_PACKED` + - *kilt_input_format*: `INT64,1,384:INT64,1,8:INT64,1,384:INT64,1,384` + - *kilt_output_format*: `FLOAT32,1,384:FLOAT32,1,384` + - *dataset_squad_tokenized_max_seq_length*: `384` + - *loadgen_buffer_size*: `10833` + - *loadgen_dataset_size*: `10833` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_safetensors + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_activation-count.#` + - Environment variables: + - *CM_MLPERF_QAIC_ACTIVATION_COUNT*: `#` + - Workflow: + * `_bert-99,offline` + - Workflow: + * `_bert-99,qaic` + - Environment variables: + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8,fp16` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * compile,qaic,model,_bert-99,_pc.99.9980 + * `if (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['qaic-model-compiler', 'bert-99-compiler']...` + - CM script: [compile-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-model-for.qaic) + * `_bert-99.9,offline` + - Workflow: + * `_bert-99.9,qaic` + - Environment variables: + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp16` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * compile,qaic,model,_bert-99.9 + * `if (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['qaic-model-compiler', 'bert-99.9-compiler']...` + - CM script: [compile-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-model-for.qaic) + * `_bert_,network-client` + - Environment variables: + - *CM_BENCHMARK*: `NETWORK_BERT_CLIENT` + - Workflow: + * `_bert_,network-server` + - Environment variables: + - *CM_BENCHMARK*: `NETWORK_BERT_SERVER` + - Workflow: + * `_bert_,qaic` + - Environment variables: + - *kilt_model_batch_size*: `1` + - *kilt_input_format*: `UINT32,1,384:UINT32,1,8:UINT32,1,384:UINT32,1,384` + - *kilt_input_formata*: `UINT32,1,384:UINT32,1,384:UINT32,1,384` + - *kilt_output_formatia*: `UINT8,1,384:UINT8,1,384` + - *kilt_device_qaic_skip_stage*: `convert` + - Workflow: + * `_bert_,singlestream` + - Environment variables: + - *kilt_model_batch_size*: `1` + - Workflow: + * `_dl2q.24xlarge,bert-99,offline` + - Environment variables: + - *qaic_activation_count*: `14` + - Workflow: + * `_dl2q.24xlarge,bert-99.9,offline` + - Environment variables: + - *qaic_activation_count*: `7` + - Workflow: + * `_dl2q.24xlarge,bert-99.9,server` + - Environment variables: + - *qaic_activation_count*: `7` + - Workflow: + * `_dl2q.24xlarge,resnet50,multistream` + - Environment variables: + - *qaic_activation_count*: `1` + - Workflow: + * `_dl2q.24xlarge,resnet50,offline` + - Environment variables: + - *qaic_activation_count*: `3` + - Workflow: + * `_dl2q.24xlarge,resnet50,server` + - Environment variables: + - *qaic_activation_count*: `3` + - Workflow: + * `_dl2q.24xlarge,retinanet,offline` + - Environment variables: + - *qaic_activation_count*: `14` + - Workflow: + * `_dl2q.24xlarge,retinanet,server` + - Environment variables: + - *qaic_activation_count*: `14` + - Workflow: + * `_dl2q.24xlarge,singlestream` + - Environment variables: + - *CM_QAIC_DEVICES*: `0` + - *qaic_activation_count*: `1` + - Workflow: + * `_num-devices.4` + - Environment variables: + - *CM_QAIC_DEVICES*: `0,1,2,3` + - Workflow: + * `_pro` + - Environment variables: + - *qaic_queue_length*: `10` + - Workflow: + * `_pro,num-devices.4,bert-99,offline` + - Environment variables: + - *qaic_activation_count*: `16` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * set,device,qaic,_vc.15 + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + * `_pro,num-devices.4,bert-99,server` + - Environment variables: + - *qaic_activation_count*: `16` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * set,device,qaic,_vc.13 + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + * `_pro,num-devices.4,bert-99.9,offline` + - Environment variables: + - *qaic_activation_count*: `8` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * set,device,qaic,_vc.13 + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + * `_pro,num-devices.4,bert-99.9,server` + - Environment variables: + - *qaic_activation_count*: `8` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * set,device,qaic,_vc.13 + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + * `_pro,num-devices.4,resnet50,offline` + - Environment variables: + - *qaic_activation_count*: `4` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * set,device,qaic,_vc.16 + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + * `_pro,num-devices.4,resnet50,server` + - Environment variables: + - *qaic_activation_count*: `4` + - Workflow: + * `_pro,num-devices.4,retinanet,offline` + - Environment variables: + - *qaic_activation_count*: `16` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * set,device,qaic,_vc.17 + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + * `_pro,num-devices.4,retinanet,server` + - Environment variables: + - *qaic_activation_count*: `16` + - Workflow: + * `_pro,num-devices.4,singlestream` + - Environment variables: + - *CM_QAIC_DEVICES*: `0` + - *qaic_activation_count*: `1` + - Workflow: + * `_rb6,bert-99,offline` + - Environment variables: + - *qaic_activation_count*: `9` + - Workflow: + * `_rb6,resnet50,multistream` + - Environment variables: + - *qaic_activation_count*: `2` + - Workflow: + * `_rb6,resnet50,offline` + - Environment variables: + - *qaic_activation_count*: `2` + - Workflow: + * `_rb6,retinanet,multistream` + - Environment variables: + - *qaic_activation_count*: `8` + - Workflow: + * `_rb6,retinanet,offline` + - Environment variables: + - *qaic_activation_count*: `9` + - Workflow: + * `_rb6,singlestream` + - Environment variables: + - *qaic_activation_count*: `1` + - Workflow: + * `_resnet50,uint8` + - Environment variables: + - *kilt_input_format*: `UINT8,-1,224,224,3` + - *kilt_device_qaic_skip_stage*: `convert` + - *CM_IMAGENET_ACCURACY_DTYPE*: `int8` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - Workflow: + * `_retinanet,qaic,uint8` + - Environment variables: + - *kilt_device_qaic_skip_stage*: `convert` + - *kilt_input_format*: `UINT8,1,3,800,800` + - *kilt_output_format*: `INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,4,1000:INT8,14,1000:INT8,1,4,1000:INT8,1,4,1000:INT8,1,4,1000` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - Workflow: + * `_singlestream,resnet50` + - Workflow: + * `_singlestream,retinanet` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_bs.#` + - Environment variables: + - *kilt_model_batch_size*: `#` + - Workflow: + * `_bs.0` + - Environment variables: + - *kilt_model_batch_size*: `1` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - *kilt_backend_type*: `cpu` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - *kilt_backend_type*: `gpu` + - Workflow: + * `_qaic` + - Environment variables: + - *CM_MLPERF_DEVICE*: `qaic` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `QAic` + - *kilt_backend_type*: `qaic` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,qaic,platform,sdk + * `if (CM_MLPERF_SKIP_RUN != True)` + - CM script: [get-qaic-platform-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-qaic-platform-sdk) + * get,lib,protobuf,_tag.v3.11.4 + * `if (CM_MLPERF_SKIP_RUN != True)` + - CM script: [get-lib-protobuf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-lib-protobuf) + * set,device,mode,qaic + * `if (CM_QAIC_VC in on)` + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + * set,device,mode,qaic,_ecc + * `if (CM_QAIC_ECC in yes)` + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_glow` + - Environment variables: + - *device*: `qaic` + - *CM_MLPERF_BACKEND*: `glow` + - *CM_MLPERF_BACKEND_LIB_NAMESPEC*: `QAic` + - Workflow: + * **`_onnxruntime`** (default) + - Environment variables: + - *device*: `onnxrt` + - *CM_MLPERF_BACKEND*: `onnxruntime` + - *CM_MLPERF_BACKEND_LIB_NAMESPEC*: `onnxruntime` + - Workflow: + * `_tensorrt` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tensorrt` + - *device*: `tensorrt` + - *CM_MLPERF_BACKEND_NAME*: `TensorRT` + - Workflow: + +
+ + + * Group "**loadgen-batch-size**" +
+ Click here to expand this section. + + * `_loadgen-batch-size.#` + - Environment variables: + - *CM_MLPERF_LOADGEN_BATCH_SIZE*: `#` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * `_offline` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/3733910/files/model.onnx` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - *kilt_model_name*: `resnet50` + - *kilt_input_count*: `1` + - *kilt_output_count*: `1` + - *kilt_input_format*: `FLOAT32,-1,224,224,3` + - *kilt_output_format*: `INT64,-1` + - *dataset_imagenet_preprocessed_input_square_side*: `224` + - *ml_model_has_background_class*: `YES` + - *ml_model_image_height*: `224` + - *loadgen_buffer_size*: `1024` + - *loadgen_dataset_size*: `50000` + - *CM_BENCHMARK*: `STANDALONE_CLASSIFICATION` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` + - *kilt_model_name*: `retinanet` + - *kilt_input_count*: `1` + - *kilt_model_max_detections*: `600` + - *kilt_output_count*: `1` + - *kilt_input_format*: `FLOAT32,-1,3,800,800` + - *kilt_output_format*: `INT64,-1` + - *dataset_imagenet_preprocessed_input_square_side*: `224` + - *ml_model_image_height*: `800` + - *ml_model_image_width*: `800` + - *loadgen_buffer_size*: `64` + - *loadgen_dataset_size*: `24576` + - *CM_BENCHMARK*: `STANDALONE_OBJECT_DETECTION` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_Pillow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pycocotools + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**nsp**" +
+ Click here to expand this section. + + * `_nsp.#` + - Workflow: + * `_nsp.14` + - Workflow: + * `_nsp.16` + - Workflow: + +
+ + + * Group "**power-mode**" +
+ Click here to expand this section. + + * `_maxn` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_MAXN*: `True` + - Workflow: + * `_maxq` + - Environment variables: + - *CM_MLPERF_NVIDIA_HARNESS_MAXQ*: `True` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_fp16` + - Workflow: + * `_fp32` + - Environment variables: + - *CM_IMAGENET_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_uint8` + - Workflow: + +
+ + + * Group "**run-mode**" +
+ Click here to expand this section. + + * `_network-client` + - Environment variables: + - *CM_RUN_MODE*: `network-client` + - Workflow: + * `_network-server` + - Environment variables: + - *CM_RUN_MODE*: `network-server` + - Workflow: + * **`_standalone`** (default) + - Environment variables: + - *CM_RUN_MODE*: `standalone` + - Workflow: + +
+ + + * Group "**sut**" +
+ Click here to expand this section. + + * `_dl2q.24xlarge` + - Environment variables: + - *CM_QAIC_DEVICES*: `0,1,2,3,4,5,6,7` + - *qaic_queue_length*: `4` + - Workflow: + * `_rb6` + - Environment variables: + - *CM_QAIC_DEVICES*: `0` + - *qaic_queue_length*: `6` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_onnxruntime,_resnet50,_standalone` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--devices=value` → `CM_QAIC_DEVICES=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--rerun=value` → `CM_RERUN=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "count":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` +* CM_FAST_COMPILATION: `yes` +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_MLPERF_LOADGEN_MODE: `performance` +* CM_SKIP_PREPROCESS_DATASET: `no` +* CM_SKIP_MODEL_DOWNLOAD: `no` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `kilt` +* CM_MLPERF_SKIP_RUN: `no` +* CM_KILT_REPO_URL: `https://github.com/GATEOverflow/kilt-mlperf` +* CM_QAIC_DEVICES: `0` +* kilt_max_wait_abs: `10000` +* verbosity: `0` +* loadgen_trigger_cold_run: `0` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,git,repo + * CM names: `--adr.['kilt-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlcommons,inference,loadgen + * CM names: `--adr.['inference-loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + * get,generic-python-lib,_mlperf_logging + * CM names: `--adr.['mlperf-logging']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,ml-model,resnet50,_fp32,_onnx,_from-tf + * `if (CM_MODEL == resnet50) AND (CM_MLPERF_DEVICE != qaic)` + * CM names: `--adr.['resnet50-model', 'ml-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * compile,qaic,model,_resnet50 + * `if (CM_MODEL == resnet50 AND CM_MLPERF_DEVICE == qaic) AND (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['qaic-model-compiler', 'resnet50-compiler']...` + - CM script: [compile-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-model-for.qaic) + * get,dataset,imagenet,preprocessed,_for.resnet50,_NHWC,_full + * `if (CM_MODEL == resnet50) AND (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['imagenet-preprocessed', 'dataset-preprocessed']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,squad-vocab + * `if (CM_MODEL in ['bert-99', 'bert-99.9']) AND (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['bert-vocab']...` + - CM script: [get-dataset-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad-vocab) + * get,dataset,tokenized,squad,_raw + * `if (CM_MODEL in ['bert-99', 'bert-99.9']) AND (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['squad-tokenized']...` + - CM script: [get-preprocessed-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-squad) + * compile,qaic,model,_retinanet + * `if (CM_MODEL == retinanet AND CM_MLPERF_DEVICE == qaic) AND (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['qaic-model-compiler', 'retinanet-compiler']...` + - CM script: [compile-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-model-for.qaic) + * get,dataset,preprocessed,openimages,_for.retinanet.onnx,_NCHW,_validation,_custom-annotations + * `if (CM_MODEL == retinanet) AND (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['openimages-preprocessed', 'dataset-preprocessed']...` + - CM script: [get-preprocessed-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openimages) + * get,lib,onnxruntime,lang-cpp,_cpu + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == cpu)` + - CM script: [get-onnxruntime-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-onnxruntime-prebuilt) + * get,lib,onnxruntime,lang-cpp,_cuda + * `if (CM_MLPERF_BACKEND == onnxruntime AND CM_MLPERF_DEVICE == gpu)` + - CM script: [get-onnxruntime-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-onnxruntime-prebuilt) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference-qualcomm/_cm.yaml)*** + * compile,cpp-program + * `if (CM_MLPERF_SKIP_RUN != True)` + * CM names: `--adr.['compile-program']...` + - CM script: [compile-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-program) + * benchmark-mlperf + * `if (CM_MLPERF_SKIP_RUN not in ['yes', True])` + * CM names: `--adr.['runner', 'mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + +___ +### Script output +`cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_IMAGENET_ACCURACY_DTYPE` +* `CM_MAX_EXAMPLES` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +* `CM_SQUAD_ACCURACY_DTYPE` +#### New environment keys auto-detected from customize + +* `CM_DATASET_LIST` +* `CM_MLPERF_CONF` +* `CM_MLPERF_DEVICE` +* `CM_MLPERF_USER_CONF` \ No newline at end of file diff --git a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md new file mode 100644 index 0000000000..7dde066479 --- /dev/null +++ b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md @@ -0,0 +1,97 @@ +# MLPerf Inference Benchmarking on AWS dl2q.24xlarge instance using 8 QAIC Cloud AI 100 + +`dl2q.24xlarge` instance is available in `us-west-2d` and it has 96 vCPUs and 768 GB of memory. + +[Deep Learning Base Qualcomm AMI (Amazon Linux 2) 20240110, ami-0799a42a111b1b87a](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#LaunchInstances:ami=ami-0799a42a111b1b87a) +image from the Community AMIs is the recommended OS image as it comes with the QIAC SDKs (both Apps and Platform) preinstalled. + +* Recommended to take 300 GB root disk + + +## System setup +``` +sudo yum install -y python38-devel git +python3.8 -m pip install cmind +cm pull repo mlcommons@ck +cm run script --tags=get,python --version_min=3.8.1 +``` + +## Bert-99 + +### Quick performance run +``` +cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic \ +--backend=glow --scenario=Offline --implementation=kilt --model=bert-99 \ +--test_query_count=40000 --precision=uint8 --rerun --quiet \ +--adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ +--quiet --adr.compiler.tags=gcc --execution-mode=test +``` + +### Full valid run +``` +cm run script --tags=generate-run-cmds,inference,_submission --device=qaic \ +--backend=glow --scenario=Offline --implementation=kilt --model=bert-99 --precision=uint8 \ +--adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ +--rerun --quiet --execution-mode=valid +``` + +The expected performance is ~5700 QPS +The expected accuracy is ~90 +* Use `--scenario=Server --server_target_qps=5200` to run the server scenario + + +## ResNet50 + +(Optional) +If you have Imagenet 2012 validation dataset downloaded, you can register it in CM as follows. This step is optional and can avoid the download from the public URL which can be slow at times. +``` +cm run script --tags=get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val +``` + +### Quick performance run + +``` +cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=resnet50 \ +--test_query_count=400000 --precision=uint8 --rerun --adr.compiler.tags=gcc \ +--adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=test --quiet +``` + +### Full valid run + +``` +cm run script --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=resnet50 \ +--precision=uint8 --rerun --adr.compiler.tags=gcc \ +--adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=valid --quiet +``` +Expected performance is ~157500 +Expected accuracy is 75.936% + +* Use `--scenario=Server --server_target_qps=152000` to run the server scenario + + +## RetinaNet + +### Quick performance run + +``` +cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=retinanet --test_query_count=40000 --precision=uint8 \ +--rerun --quiet --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.1,_dl2q.24xlarge,_bs.1 \ +--adr.compiler.tags=gcc --execution-mode=test +``` + +### Full valid run + +``` +cm run script --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=retinanet \ +--precision=uint8 --rerun --adr.compiler.tags=gcc --adr.dataset-preprocessed.tags=_custom-annotations \ +--adr.mlperf-inference-implementation.tags=_bs.1,_dl2q.24xlarge --execution-mode=valid --quiet +``` +Expected performance is ~2200 +The expected accuracy is 37.234 + +* Use `--scenario=Server --server_target_qps=2050` to run the server scenario + diff --git a/script/app-mlperf-inference-qualcomm/_cm.yaml b/script/app-mlperf-inference-qualcomm/_cm.yaml new file mode 100644 index 0000000000..721373a411 --- /dev/null +++ b/script/app-mlperf-inference-qualcomm/_cm.yaml @@ -0,0 +1,775 @@ +# Identification of this CM script +alias: app-mlperf-inference-qualcomm +uid: eef1aca5d7c0470e +cache: false +can_force_cache: true + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - qualcomm-harness + - qualcomm + - kilt-harness + - kilt + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: 'yes' + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: kilt + CM_MLPERF_SKIP_RUN: 'no' + CM_KILT_REPO_URL: https://github.com/GATEOverflow/kilt-mlperf + CM_QAIC_DEVICES: "0" + kilt_max_wait_abs: 10000 + verbosity: 0 + loadgen_trigger_cold_run: 0 + +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + devices: CM_QAIC_DEVICES + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + - tags: get,git,repo + names: + - kilt-repo + update_tags_from_env_with_prefix: + _repo.: + - CM_KILT_REPO_URL + extra_cache_tags: kilt,kilt-repo + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_KILT_CHECKOUT_PATH + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download MLPerf inference loadgen + - tags: get,mlcommons,inference,loadgen + names: + - inference-loadgen + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + skip_if_env: + CM_MLPERF_DEVICE: + - qaic + names: + - resnet50-model + - ml-model + tags: get,ml-model,resnet50,_fp32,_onnx,_from-tf + + - enable_if_env: + CM_MODEL: + - resnet50 + CM_MLPERF_DEVICE: + - qaic + tags: compile,qaic,model,_resnet50 + names: + - qaic-model-compiler + - resnet50-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + - dataset-preprocessed + tags: get,dataset,imagenet,preprocessed,_for.resnet50,_NHWC,_full + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + + + ######################################################################## + # Install bert dependencies + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-vocab + tags: get,squad-vocab + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - squad-tokenized + tags: get,dataset,tokenized,squad,_raw + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + ######################################################################## + # Install OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + CM_MLPERF_DEVICE: + - qaic + tags: compile,qaic,model,_retinanet + names: + - qaic-model-compiler + - retinanet-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-preprocessed + - dataset-preprocessed + tags: get,dataset,preprocessed,openimages,_for.retinanet.onnx,_NCHW,_validation,_custom-annotations + update_tags_from_env_with_prefix1: #disabling now to prevent unnecessary preprocessing + _quant-scale.: + - CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET + _quant-offset.: + - CM_QAIC_MODEL_RETINANET_IMAGE_SCALE + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + + + +######################################################################## + # Install ML engines via CM + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - cpu + tags: get,lib,onnxruntime,lang-cpp,_cpu + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + tags: get,lib,onnxruntime,lang-cpp,_cuda + + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - compile-program + tags: compile,cpp-program + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + kilt_backend_type: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + kilt_backend_type: gpu + qaic: + group: device + env: + CM_MLPERF_DEVICE: qaic + CM_MLPERF_DEVICE_LIB_NAMESPEC: QAic + kilt_backend_type: qaic + deps: + - tags: get,qaic,platform,sdk + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + - tags: get,lib,protobuf,_tag.v3.11.4 + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + - tags: set,device,mode,qaic + enable_if_env: + CM_QAIC_VC: + "on" + update_tags_from_env_with_prefix": + _vc.: + - CM_QAIC_VC + - tags: set,device,mode,qaic,_ecc + enable_if_env: + CM_QAIC_ECC: + "yes" + + tensorrt: + group: framework + env: + CM_MLPERF_BACKEND: tensorrt + device: tensorrt + CM_MLPERF_BACKEND_NAME: TensorRT + + # ML engine + onnxruntime: + group: framework + default: true + env: + device: onnxrt + CM_MLPERF_BACKEND: onnxruntime + CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime + + glow: + group: framework + env: + device: qaic + CM_MLPERF_BACKEND: glow + CM_MLPERF_BACKEND_LIB_NAMESPEC: QAic + + bs.#: + group: batch-size + env: + kilt_model_batch_size: "#" + adr: + qaic-model-compiler: + tags: "_bs.#" + + bs.0: + group: batch-size + env: + kilt_model_batch_size: "1" + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + kilt_model_name: resnet50 + kilt_input_count: 1 + kilt_output_count: 1 + kilt_input_format: "FLOAT32,-1,224,224,3" + kilt_output_format: "INT64,-1" + dataset_imagenet_preprocessed_input_square_side: 224 + ml_model_has_background_class: "YES" + ml_model_image_height: 224 + loadgen_buffer_size: 1024 + loadgen_dataset_size: 50000 + CM_BENCHMARK: STANDALONE_CLASSIFICATION + + resnet50,uint8: + env: + kilt_input_format: "UINT8,-1,224,224,3" + kilt_device_qaic_skip_stage: convert + CM_IMAGENET_ACCURACY_DTYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + bert-99,qaic: + deps: + - tags: compile,qaic,model,_bert-99,_pc.99.9980 + names: + - qaic-model-compiler + - bert-99-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + env: + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8,fp16 + + bert-99.9,qaic: + deps: + - tags: compile,qaic,model,_bert-99.9 + names: + - qaic-model-compiler + - bert-99.9-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + env: + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp16 + + retinanet: + group: model + base: + - bs.1 + env: + CM_MODEL: retinanet + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + kilt_model_name: retinanet + kilt_input_count: 1 + #kilt_model_disable_nms: '' + kilt_model_max_detections: 600 + kilt_output_count: 1 + kilt_input_format: "FLOAT32,-1,3,800,800" + kilt_output_format: "INT64,-1" + dataset_imagenet_preprocessed_input_square_side: 224 + ml_model_image_height: 800 + ml_model_image_width: 800 + loadgen_buffer_size: 64 + loadgen_dataset_size: 24576 + CM_BENCHMARK: STANDALONE_OBJECT_DETECTION + + deps: + - tags: get,generic-python-lib,_Pillow + - tags: get,generic-python-lib,_torch + - tags: get,generic-python-lib,_torchvision + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pycocotools + + retinanet,qaic,uint8: + env: + kilt_device_qaic_skip_stage: 'convert' + kilt_input_format: "UINT8,1,3,800,800" + kilt_output_format: "INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,4,1000:INT8,14,1000:INT8,1,4,1000:INT8,1,4,1000:INT8,1,4,1000" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + + bert_: + deps: + - tags: get,generic-python-lib,_transformers + - tags: get,generic-python-lib,_safetensors + - tags: get,generic-python-lib,_onnx + env: + CM_BENCHMARK: STANDALONE_BERT + kilt_model_name: bert + kilt_model_seq_length: 384 + kilt_model_bert_variant: BERT_PACKED + kilt_input_format: "INT64,1,384:INT64,1,8:INT64,1,384:INT64,1,384" + kilt_output_format: "FLOAT32,1,384:FLOAT32,1,384" + dataset_squad_tokenized_max_seq_length: 384 + loadgen_buffer_size: 10833 + loadgen_dataset_size: 10833 + + bert_,qaic: + default_variations: + batch-size: bs.0 + env: + kilt_model_batch_size: 1 + kilt_input_format: "UINT32,1,384:UINT32,1,8:UINT32,1,384:UINT32,1,384" + kilt_input_formata: "UINT32,1,384:UINT32,1,384:UINT32,1,384" + kilt_output_formatia: "UINT8,1,384:UINT8,1,384" + kilt_device_qaic_skip_stage: 'convert' + + standalone: + group: run-mode + default: true + env: + CM_RUN_MODE: standalone + + network-server: + group: run-mode + env: + CM_RUN_MODE: network-server + + network-client: + group: run-mode + env: + CM_RUN_MODE: network-client + + bert_,network-server: + env: + CM_BENCHMARK: NETWORK_BERT_SERVER + + bert_,network-client: + env: + CM_BENCHMARK: NETWORK_BERT_CLIENT + + bert_,singlestream: + env: + kilt_model_batch_size: 1 + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + + loadgen-batch-size.#: + group: loadgen-batch-size + env: + CM_MLPERF_LOADGEN_BATCH_SIZE: "#" + + bert-99,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + + bert-99.9,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + + activation-count.#: + env: + CM_MLPERF_QAIC_ACTIVATION_COUNT: "#" + #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "activation_count.#" + + maxq: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + + maxn: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + adr: + qaic-model-compiler: + tags: _singlestream + singlestream,resnet50: + default_variations: + batch-size: bs.1 + + singlestream,retinanet: + default_variations: + batch-size: bs.1 + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + adr: + qaic-model-compiler: + tags: _multistream + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + adr: + qaic-model-compiler: + tags: _offline + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + adr: + qaic-model-compiler: + tags: _server + + uint8: + group: precision + adr: + dataset-preprocessed: + tags: _uint8,_rgb8 + fp16: + group: precision + fp32: + group: precision + adr: + dataset-preprocessed: + tags: _float32,_rgb32 + env: + CM_IMAGENET_ACCURACY_DTYPE: float32 + + nsp.14: + group: nsp + adr: + qaic-model-compiler: + tags: _nsp.14 + + nsp.16: + group: nsp + base: + - pro + adr: + qaic-model-compiler: + tags: _nsp.14 + + nsp.#: + group: nsp + adr: + qaic-model-compiler: + tags: _nsp.# + + + dl2q.24xlarge: + group: sut + base: + - nsp.14 + env: + CM_QAIC_DEVICES: "0,1,2,3,4,5,6,7" + qaic_queue_length: 4 + + dl2q.24xlarge,singlestream: + env: + CM_QAIC_DEVICES: 0 + qaic_activation_count: "1" + + dl2q.24xlarge,resnet50,offline: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "3" + + dl2q.24xlarge,bert-99.9,offline: + env: + qaic_activation_count: "7" + + dl2q.24xlarge,bert-99,offline: + env: + qaic_activation_count: "14" + + dl2q.24xlarge,retinanet,offline: + env: + qaic_activation_count: "14" + + dl2q.24xlarge,resnet50,server: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "3" + + dl2q.24xlarge,bert-99.9,server: + env: + qaic_activation_count: "7" + + dl2q.24xlarge,retinanet,server: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "14" + + dl2q.24xlarge,resnet50,multistream: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "1" + + pro: + env: + qaic_queue_length: 10 + + num-devices.4: + env: + CM_QAIC_DEVICES: "0,1,2,3" + + pro,num-devices.4,singlestream: + env: + CM_QAIC_DEVICES: "0" + qaic_activation_count: "1" + + pro,num-devices.4,resnet50,offline: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "4" + deps: + - tags: set,device,qaic,_vc.16 + + pro,num-devices.4,bert-99,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + env: + qaic_activation_count: "16" + deps: + - tags: set,device,qaic,_vc.15 + + pro,num-devices.4,bert-99.9,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + env: + qaic_activation_count: "8" + deps: + - tags: set,device,qaic,_vc.13 + + pro,num-devices.4,bert-99,server: + default_variations: + loadgen-batch-size: loadgen-batch-size.1024 + env: + qaic_activation_count: "16" + deps: + - tags: set,device,qaic,_vc.13 + + pro,num-devices.4,bert-99.9,server: + default_variations: + loadgen-batch-size: loadgen-batch-size.1024 + env: + qaic_activation_count: "8" + deps: + - tags: set,device,qaic,_vc.13 + + pro,num-devices.4,retinanet,offline: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "16" + deps: + - tags: set,device,qaic,_vc.17 + + pro,num-devices.4,resnet50,server: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "4" + + pro,num-devices.4,retinanet,server: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "16" + + rb6: + group: sut + base: + - nsp.9 + env: + CM_QAIC_DEVICES: "0" + qaic_queue_length: 6 + + rb6,singlestream: + env: + qaic_activation_count: "1" + + rb6,resnet50,offline: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "2" + + rb6,resnet50,multistream: + default_variations: + batch-size: bs.4 + env: + qaic_activation_count: "2" + + rb6,bert-99,offline: + env: + qaic_activation_count: "9" + + rb6,retinanet,offline: + env: + qaic_activation_count: "9" + + rb6,retinanet,multistream: + env: + qaic_activation_count: "8" + +docker: + docker_real_run: False diff --git a/script/app-mlperf-inference-qualcomm/customize.py b/script/app-mlperf-inference-qualcomm/customize.py new file mode 100644 index 0000000000..fc858d9539 --- /dev/null +++ b/script/app-mlperf-inference-qualcomm/customize.py @@ -0,0 +1,189 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + if 'CM_MODEL' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + kilt_root = env['CM_KILT_CHECKOUT_PATH'] + + print(f"Harness Root: {kilt_root}") + + source_files = [] + env['CM_SOURCE_FOLDER_PATH'] = env['CM_KILT_CHECKOUT_PATH'] + + env['kilt_model_root'] = env.get('CM_ML_MODEL_FILE_WITH_PATH') + + if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['kilt_model_batch_size'] = env['CM_MLPERF_LOADGEN_BATCH_SIZE'] + + if env.get('CM_QAIC_DEVICES', '') != '': + env['kilt_device_ids'] = env['CM_QAIC_DEVICES'] + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + + if '+CPLUS_INCLUDE_PATH' not in env: + env['+CPLUS_INCLUDE_PATH'] = [] + + if env['CM_MLPERF_DEVICE'] == "qaic": + env['kilt_model_root'] = os.path.dirname(env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']) + + if env.get('CM_MODEL') == "resnet50": + env['dataset_imagenet_preprocessed_subset_fof'] = env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] + env['dataset_imagenet_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH'] + + elif "bert" in env.get('CM_MODEL'): + env['dataset_squad_tokenized_max_seq_length'] = env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] + env['dataset_squad_tokenized_root'] = env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] + env['dataset_squad_tokenized_input_ids'] = os.path.basename(env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS']) + env['dataset_squad_tokenized_input_mask'] = os.path.basename(env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK']) + env['dataset_squad_tokenized_segment_ids'] = os.path.basename(env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS']) + + elif "retinanet" in env.get('CM_MODEL'): + env['kilt_prior_bin_path'] = os.path.join(kilt_root, "plugins", "nms-abp", "data") + env['kilt_object_detection_preprocessed_subset_fof'] = os.path.basename(env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST']) + env['kilt_object_detection_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH'] + env['+ CXXFLAGS'].append("-DMODEL_RX50") + env['+ CXXFLAGS'].append("-DSDK_1_11_X") + + loc_offset = env.get('CM_QAIC_MODEL_RETINANET_LOC_OFFSET') + if loc_offset: + env['+ CXXFLAGS'].append("-DMODEL_RX50") + + keys = [ 'LOC_OFFSET', 'LOC_SCALE', 'CONF_OFFSET', 'CONF_SCALE' ] + + if env.get('CM_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes': + env['+ CXXFLAGS'].append("-DUSE_MULTIPLE_SCALES_OFFSETS=1") + for j in range(0,4): + keys.append(f'LOC_OFFSET{j}') + keys.append(f'LOC_SCALE{j}') + keys.append(f'CONF_OFFSET{j}') + keys.append(f'CONF_SCALE{j}') + + for key in keys: + value = env.get('CM_QAIC_MODEL_RETINANET_'+key, '') + if value != '': + env['+ CXXFLAGS'].append(f" -D{key}_={value} ") + + if env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_SERVER': + source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "server", "pack.cpp")) + source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "server", "server.cpp")) + env['+ CXXFLAGS'].append("-DNETWORK_DIVISION=1") + elif env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_CLIENT': + #source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "pack.cpp")) + #env['+CPLUS_INCLUDE_PATH'].append(kilt_root) + #source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "client.cpp")) + env['+ CXXFLAGS'].append("-DNETWORK_DIVISION") + elif env.get('CM_BENCHMARK', '') == 'STANDALONE_BERT': + source_files.append(os.path.join(kilt_root, "benchmarks", "standalone", "bert", "pack.cpp")) + + script_path = i['run_script_input']['path'] + if env['CM_MODEL'] == "retinanet": + env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + + + for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + if file.endswith(".c") or file.endswith(".cpp"): + source_files.append(file) + + if 'SERVER' not in env.get('CM_BENCHMARK', ''): + source_files.append(os.path.join(kilt_root, "benchmarks", "harness", "harness.cpp")) + + #source_files.append(env['CM_QAIC_API_SRC_FILE']) + + env['+CPLUS_INCLUDE_PATH'].append(kilt_root) + env['+C_INCLUDE_PATH'].append(kilt_root) + + if env['CM_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + + elif env['CM_MLPERF_DEVICE'] == 'qaic': + source_files.append(os.path.join(kilt_root, "devices", "qaic", "api", "master", "QAicInfApi.cpp")) + + print(f"Compiling the source files: {source_files}") + env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + + env['+ CXXFLAGS'].append("-std=c++17") + env['+ CXXFLAGS'].append("-fpermissive") + + env['+ CXXFLAGS'].append("-DKILT_CONFIG_FROM_ENV") + env['+ CXXFLAGS'].append("-DKILT_CONFIG_TRANSLATE_X") + env['+ CXXFLAGS'].append("-DKILT_BENCHMARK_" + env['CM_BENCHMARK']) + env['+ CXXFLAGS'].append("-DKILT_DEVICE_" + env['device'].upper()) + + # add preprocessor flag like "#define CM_MODEL_RESNET50" + #env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) + # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + env['CM_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + env['CM_MLPERF_DEVICE'].upper()) + + if '+ LDCXXFLAGS' not in env: + env['+ LDCXXFLAGS'] = [ ] + + env['+ LDCXXFLAGS'] += [ + "-lmlperf_loadgen", + "-lpthread", + "-ldl" + ] + # e.g. -lonnxruntime + if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + # e.g. -lcudart + if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + + if '-DPRINT_NETWORK_DESCRIPTOR' in env['+ CXXFLAGS']: + env['+ LDCXXFLAGS'].append('-lprotobuf') + + env['CM_LINKER_LANG'] = 'CXX' + env['CM_RUN_DIR'] = env.get('CM_MLPERF_OUTPUT_DIR', os.getcwd()) + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + + env['loadgen_mlperf_conf_path'] = env['CM_MLPERF_CONF']# to LOADGEN_MLPERF_CONF + env['loadgen_user_conf_path'] = env['CM_MLPERF_USER_CONF']# to LOADGEN_USER_CONF + env['loadgen_scenario'] = env['CM_MLPERF_LOADGEN_SCENARIO'] + + loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] + if loadgen_mode == 'performance': + kilt_loadgen_mode = 'PerformanceOnly' + elif loadgen_mode == 'accuracy': + kilt_loadgen_mode = 'AccuracyOnly' + elif loadgen_mode == 'compliance': + kilt_loadgen_mode = 'PerformanceOnly' + else: + return {'return':1, 'error': 'Unknown loadgen mode'} + env['loadgen_mode'] = kilt_loadgen_mode + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/app-mlperf-inference-qualcomm/run.sh b/script/app-mlperf-inference-qualcomm/run.sh new file mode 100644 index 0000000000..ddcd0b5504 --- /dev/null +++ b/script/app-mlperf-inference-qualcomm/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/script/app-mlperf-inference/README-about.md b/script/app-mlperf-inference/README-about.md new file mode 100644 index 0000000000..987c4e79cb --- /dev/null +++ b/script/app-mlperf-inference/README-about.md @@ -0,0 +1,22 @@ +This CM script provides a unified interface to prepare and run a modular version of the [MLPerf inference benchmark](https://arxiv.org/abs/1911.02549) +across diverse ML models, data sets, frameworks, libraries, run-time systems and platforms +using the [cross-platform automation meta-framework (MLCommons CM)](https://github.com/mlcommons/ck). + +It is assembled from reusable and interoperable [CM scripts for DevOps and MLOps](../list_of_scripts.md) +being developed by the [open MLCommons taskforce on automation and reproducibility](../mlperf-education-workgroup.md). + +It is a higher-level wrapper to several other CM scripts modularizing the MLPerf inference benchmark: +* [Reference Python implementation](../app-mlperf-inference-reference) +* [Universal C++ implementation](../app-mlperf-inference-cpp) +* [TFLite C++ implementation](../app-mlperf-inference-tflite-cpp) +* [NVidia optimized implementation](app-mlperf-inference-nvidia) + +See [this SCC'23 tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md) +to use this script to run a reference (unoptimized) Python implementation of the MLPerf object detection benchmark +with RetinaNet model, Open Images dataset, ONNX runtime and CPU target. + +See this [CM script](../run-mlperf-inference-app) to automate and validate your MLPerf inference submission. + +Get in touch with the [open taskforce on automation and reproducibility at MLCommons](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +if you need help with your submission or if you would like to participate in further modularization of MLPerf +and collaborative design space exploration and optimization of ML Systems. diff --git a/script/app-mlperf-inference/README-extra.md b/script/app-mlperf-inference/README-extra.md new file mode 100644 index 0000000000..bd1acdbecb --- /dev/null +++ b/script/app-mlperf-inference/README-extra.md @@ -0,0 +1,131 @@ +# Examples + +## MLPerf object detection with python, RetinaNet, Open Images, ONNX runtime (CPU), Ubuntu + +This example shows how to use this CM script to run the reference python implementation +of the MLPerf inference benchmark for object detection, RetinaNet, ONNX run-time (CPU) and Ubuntu. + +Install the MLCommons CM automation meta-framework as described [here]( https://github.com/mlcommons/ck/blob/master/cm/docs/installation.md ). + +Here is the typical installation on Ubuntu 20.04: + +```bash +sudo apt install python3 python3-pip git wget +python3 -m pip install cmind +source .profile +``` + +Next you need to install a CM repository with [cross-platform CM scripts](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) for ML Systems: + +```bash +cm pull repo mlcommons@ck +``` + +Note that you can fork [this repository](https://github.com/mlcommons/ck) and use it instead of mlcommons@ck +to add CM scripts for your own public and private ML models, data sets, software and hardware. +In such case, just change mlcommons@ck to your own fork in the above command. + +You can find the location of this repository on your system as follows: +```bash +cm find repo mlcommons@ck +``` + +Now we suggest you to set up a virtual python via CM to avoid mixing up your native Python installation: +```bash +cm run script "install python-venv" --name=mlperf +``` + +If you need a specific python version use this command: +```bash +cm run script "install python-venv" --name=mlperf --version=3.10.7 +``` + +You can now test the MLPerf inference benchmark with RetinaNet and ONNX runtime CPU using just one CM command: + +```bash +cm run script "app mlperf inference generic reference _python _retinanet _onnxruntime _cpu" \ + --adr.python.name=mlperf \ + --adr.compiler.tags=gcc \ + --scenario=Offline \ + --mode=accuracy \ + --test_query_count=10 \ + --quiet +``` + +The first run of this CM script takes around 25 minutes on a GCP instance with 16 cores and 64GB of memory because +CM will automatically detect, install and cache all the necessary ML components +while adapting them to your system using [portable CM scripts](https://github.com/mlcommons/ck/tree/master/cm-mlops/script). + +These dependencies are described using [this simple YAML file](https://github.com/octoml/ck/blob/master/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml#L57) +and can be turned on or off using different environment variables passed to this CM script using `--env.KEY=VALUE`. + +You should see the following output in the end: +```txt + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.654 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.827 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.654 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.657 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.566 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.705 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.735 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.738 + +mAP=65.417% + +``` + +Any other run will automatically pick up all dependencies from the CM cache while setting up all environment variables and files +to launch the prepared MLPerf inference benchmark. For example, you can run these benchmark in performance mode as follows: + +```bash +cm run script "app mlperf inference generic reference _python _retinanet _onnxruntime _cpu" \ + --adr.python.name=mlperf \ + --adr.compiler.tags=gcc \ + --scenario=Offline \ + --mode=performance \ + --test_query_count=10 \ + --rerun +``` + +You should see the following output: +```txt +TestScenario.Offline qps=0.89, mean=8.6960, time=11.180, acc=31.661%, mAP=65.417%, queries=10, tiles=50.0:8.8280,80.0:9.0455,90.0:9.1450,95.0:9.2375,99.0:9.3114,99.9:9.3281 +``` + + + +### Using Docker + +Please check the prototype of Docker containers with the CM automation meta-framework +for modular MLPerf [here](https://github.com/mlcommons/ck/tree/master/docker) +(on-going work). + +```bash +docker build -f dockerfiles/resnet50/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile -t resnet50_onnxruntime:ubuntu20.04 . +``` + +```bash +docker run -it --rm resnet50_onnxruntime:ubuntu20.04 -c "cm run script --tags=app,mlperf,inference,reference,python_resnet50,_onnxruntime,_cpu --scenario=Offline --mode=accuracy" +``` + + + + +# Future work + +* See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). + +* See the development roadmap [here](https://github.com/mlcommons/ck/issues/536). + +* See extension projects to enable collaborative benchmarking, design space exploration and optimization of ML and AI Systems [here](https://github.com/mlcommons/ck/issues/627). + + +# Developers + +[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), +[Grigori Fursin]( https://cKnowledge.org/gfursin ) +and [individual contributors](https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md). diff --git a/script/app-mlperf-inference/README.md b/script/app-mlperf-inference/README.md new file mode 100644 index 0000000000..81bbd528cd --- /dev/null +++ b/script/app-mlperf-inference/README.md @@ -0,0 +1,777 @@ +Automatically generated README for this automation recipe: **app-mlperf-inference** + +Category: **Modular MLPerf inference benchmark pipeline** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-inference,d775cac873ee4231) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- + +This CM script provides a unified interface to prepare and run a modular version of the [MLPerf inference benchmark](https://arxiv.org/abs/1911.02549) +across diverse ML models, data sets, frameworks, libraries, run-time systems and platforms +using the [cross-platform automation meta-framework (MLCommons CM)](https://github.com/mlcommons/ck). + +It is assembled from reusable and interoperable [CM scripts for DevOps and MLOps](../list_of_scripts.md) +being developed by the [open MLCommons taskforce on automation and reproducibility](../mlperf-education-workgroup.md). + +It is a higher-level wrapper to several other CM scripts modularizing the MLPerf inference benchmark: +* [Reference Python implementation](../app-mlperf-inference-reference) +* [Universal C++ implementation](../app-mlperf-inference-cpp) +* [TFLite C++ implementation](../app-mlperf-inference-tflite-cpp) +* [NVidia optimized implementation](app-mlperf-inference-nvidia) + +See [this SCC'23 tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md) +to use this script to run a reference (unoptimized) Python implementation of the MLPerf object detection benchmark +with RetinaNet model, Open Images dataset, ONNX runtime and CPU target. + +See this [CM script](../run-mlperf-inference-app) to automate and validate your MLPerf inference submission. + +Get in touch with the [open taskforce on automation and reproducibility at MLCommons](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +if you need help with your submission or if you would like to participate in further modularization of MLPerf +and collaborative design space exploration and optimization of ML Systems. + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *app,vision,language,mlcommons,mlperf,inference,generic* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf inference generic" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,vision,language,mlcommons,mlperf,inference,generic` + +`cm run script --tags=app,vision,language,mlcommons,mlperf,inference,generic[,variations] [--input_flags]` + +*or* + +`cmr "app vision language mlcommons mlperf inference generic"` + +`cmr "app vision language mlcommons mlperf inference generic [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + + +#### Input Flags + +* --**scenario**=MLPerf inference scenario {Offline,Server,SingleStream,MultiStream} (*Offline*) +* --**mode**=MLPerf inference mode {performance,accuracy} (*accuracy*) +* --**test_query_count**=Specifies the number of samples to be processed during a test run +* --**target_qps**=Target QPS +* --**target_latency**=Target Latency +* --**max_batchsize**=Maximum batchsize to be used +* --**num_threads**=Number of CPU threads to launch the application with +* --**hw_name**=Valid value - any system description which has a config file (under same name) defined [here](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-configs-sut-mlperf-inference/configs) +* --**output_dir**=Location where the outputs are produced +* --**rerun**=Redo the run even if previous run files exist (*True*) +* --**regenerate_files**=Regenerates measurement files including accuracy.txt files even if a previous run exists. This option is redundant if `--rerun` is used +* --**adr.python.name**=Python virtual environment name (optional) (*mlperf*) +* --**adr.python.version_min**=Minimal Python version (*3.8*) +* --**adr.python.version**=Force Python version (must have all system deps) +* --**adr.compiler.tags**=Compiler for loadgen (*gcc*) +* --**adr.inference-src-loadgen.env.CM_GIT_URL**=Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) +* --**adr.inference-src.env.CM_GIT_URL**=Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) +* --**quiet**=Quiet run (select default values for all questions) (*False*) +* --**readme**=Generate README with the reproducibility report +* --**debug**=Debug MLPerf script + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "scenario":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,inference,generic' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,vision,language,mlcommons,mlperf,inference,generic"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,vision,language,mlcommons,mlperf,inference,generic) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app vision language mlcommons mlperf inference generic[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**implementation**" +
+ Click here to expand this section. + + * `_cpp` + - Aliases: `_mil,_mlcommons-cpp` + - Environment variables: + - *CM_MLPERF_CPP*: `yes` + - *CM_MLPERF_IMPLEMENTATION*: `mlcommons_cpp` + - *CM_IMAGENET_ACCURACY_DTYPE*: `float32` + - *CM_OPENIMAGES_ACCURACY_DTYPE*: `float32` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * app,mlperf,cpp,inference + * `if (CM_SKIP_RUN != True)` + * CM names: `--adr.['cpp-mlperf-inference', 'mlperf-inference-implementation']...` + - CM script: [app-mlperf-inference-mlcommons-cpp](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-mlcommons-cpp) + * `_intel-original` + - Aliases: `_intel` + - Environment variables: + - *CM_MLPERF_IMPLEMENTATION*: `intel` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * reproduce,mlperf,inference,intel + * `if (CM_SKIP_RUN != True)` + * CM names: `--adr.['intel', 'intel-harness', 'mlperf-inference-implementation']...` + - CM script: [app-mlperf-inference-intel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-intel) + * `_kilt` + - Aliases: `_qualcomm` + - Environment variables: + - *CM_MLPERF_IMPLEMENTATION*: `qualcomm` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * reproduce,mlperf,inference,kilt + * `if (CM_SKIP_RUN != True)` + * CM names: `--adr.['kilt', 'kilt-harness', 'mlperf-inference-implementation']...` + - CM script: [app-mlperf-inference-qualcomm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-qualcomm) + * `_nvidia-original` + - Aliases: `_nvidia` + - Environment variables: + - *CM_MLPERF_IMPLEMENTATION*: `nvidia` + - *CM_SQUAD_ACCURACY_DTYPE*: `float16` + - *CM_IMAGENET_ACCURACY_DTYPE*: `int32` + - *CM_CNNDM_ACCURACY_DTYPE*: `int32` + - *CM_LIBRISPEECH_ACCURACY_DTYPE*: `int8` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda-devices + * `if (CM_CUDA_DEVICE_PROP_GLOBAL_MEMORY not in ['yes', 'on'])` + - CM script: [get-cuda-devices](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda-devices) + 1. ***Read "prehook_deps" on other CM scripts*** + * reproduce,mlperf,nvidia,inference,_run_harness + * `if (CM_SKIP_RUN != True)` + * CM names: `--adr.['nvidia-original-mlperf-inference', 'nvidia-harness', 'mlperf-inference-implementation']...` + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + * **`_reference`** (default) + - Aliases: `_mlcommons-python,_python` + - Environment variables: + - *CM_MLPERF_PYTHON*: `yes` + - *CM_MLPERF_IMPLEMENTATION*: `mlcommons_python` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - *CM_IMAGENET_ACCURACY_DTYPE*: `float32` + - *CM_OPENIMAGES_ACCURACY_DTYPE*: `float32` + - *CM_LIBRISPEECH_ACCURACY_DTYPE*: `float32` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * app,mlperf,reference,inference + * `if (CM_SKIP_RUN != True)` + * CM names: `--adr.['python-reference-mlperf-inference', 'mlperf-inference-implementation']...` + - CM script: [app-mlperf-inference-mlcommons-python](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-mlcommons-python) + * `_tflite-cpp` + - Aliases: `_ctuning-cpp-tflite` + - Environment variables: + - *CM_MLPERF_TFLITE_CPP*: `yes` + - *CM_MLPERF_CPP*: `yes` + - *CM_MLPERF_IMPLEMENTATION*: `ctuning_cpp_tflite` + - *CM_IMAGENET_ACCURACY_DTYPE*: `float32` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * app,mlperf,tflite-cpp,inference + * `if (CM_SKIP_RUN != True)` + * CM names: `--adr.['tflite-cpp-mlperf-inference', 'mlperf-inference-implementation']...` + - CM script: [app-mlperf-inference-ctuning-cpp-tflite](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite) + +
+ + + * Group "**backend**" +
+ Click here to expand this section. + + * `_deepsparse` + - Environment variables: + - *CM_MLPERF_BACKEND*: `deepsparse` + - Workflow: + * `_glow` + - Environment variables: + - *CM_MLPERF_BACKEND*: `glow` + - Workflow: + * `_ncnn` + - Environment variables: + - *CM_MLPERF_BACKEND*: `ncnn` + - Workflow: + * `_onnxruntime` + - Environment variables: + - *CM_MLPERF_BACKEND*: `onnxruntime` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - Workflow: + * `_ray` + - Environment variables: + - *CM_MLPERF_BACKEND*: `ray` + - Workflow: + * `_tensorrt` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tensorrt` + - Workflow: + * `_tf` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tf` + - Workflow: + * `_tflite` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tflite` + - Workflow: + * `_tvm-onnx` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tvm-onnx` + - Workflow: + * `_tvm-pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tvm-pytorch` + - Workflow: + * `_tvm-tflite` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tvm-tflite` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - Workflow: + * `_qaic` + - Environment variables: + - *CM_MLPERF_DEVICE*: `qaic` + - Workflow: + * `_rocm` + - Environment variables: + - *CM_MLPERF_DEVICE*: `rocm` + - Workflow: + * `_tpu` + - Environment variables: + - *CM_MLPERF_DEVICE*: `tpu` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_3d-unet-99` + - Environment variables: + - *CM_MODEL*: `3d-unet-99` + - Workflow: + * `_3d-unet-99.9` + - Environment variables: + - *CM_MODEL*: `3d-unet-99.9` + - Workflow: + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - Workflow: + * `_dlrm-v2-99` + - Environment variables: + - *CM_MODEL*: `dlrm-v2-99` + - Workflow: + * `_dlrm-v2-99.9` + - Environment variables: + - *CM_MODEL*: `dlrm-v2-99.9` + - Workflow: + * `_efficientnet` + - Environment variables: + - *CM_MODEL*: `efficientnet` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset-aux,imagenet-aux + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_imagenet + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on)` + * CM names: `--adr.['mlperf-accuracy-script', 'imagenet-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_gptj-99` + - Environment variables: + - *CM_MODEL*: `gptj-99` + - Workflow: + * `_gptj-99.9` + - Environment variables: + - *CM_MODEL*: `gptj-99.9` + - Workflow: + * `_llama2-70b-99` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99` + - Workflow: + * `_llama2-70b-99.9` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99.9` + - Workflow: + * `_mobilenet` + - Environment variables: + - *CM_MODEL*: `mobilenet` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset-aux,imagenet-aux + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_imagenet + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on)` + * CM names: `--adr.['mlperf-accuracy-script', 'imagenet-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset-aux,imagenet-aux + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_imagenet + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on)` + * CM names: `--adr.['mlperf-accuracy-script', 'imagenet-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - Workflow: + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_openimages + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on)` + * CM names: `--adr.['mlperf-accuracy-script', 'openimages-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_rnnt` + - Environment variables: + - *CM_MODEL*: `rnnt` + - Workflow: + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_librispeech + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on) AND (CM_MLPERF_IMPLEMENTATION != nvidia)` + * CM names: `--adr.['mlperf-accuracy-script', 'librispeech-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_sdxl` + - Environment variables: + - *CM_MODEL*: `stable-diffusion-xl` + - Workflow: + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_coco2014 + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on) AND (CM_MLPERF_IMPLEMENTATION != nvidia)` + * CM names: `--adr.['mlperf-accuracy-script', 'coco2014-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_bfloat16` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `False` + - *CM_MLPERF_MODEL_PRECISION*: `float32` + - Workflow: + * `_float16` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `False` + - *CM_MLPERF_MODEL_PRECISION*: `float32` + - Workflow: + * **`_float32`** (default) + - Aliases: `_fp32` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `False` + - *CM_MLPERF_MODEL_PRECISION*: `float32` + - Workflow: + * `_int4` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `True` + - *CM_MLPERF_MODEL_PRECISION*: `int4` + - Workflow: + * `_int8` + - Aliases: `_quantized` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `True` + - *CM_MLPERF_MODEL_PRECISION*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_MLPERF_QUANTIZATION*: `True` + - *CM_MLPERF_MODEL_PRECISION*: `uint8` + - Workflow: + +
+ + + * Group "**execution-mode**" +
+ Click here to expand this section. + + * `_fast` + - Environment variables: + - *CM_FAST_FACTOR*: `5` + - *CM_OUTPUT_FOLDER_NAME*: `fast_results` + - *CM_MLPERF_RUN_STYLE*: `fast` + - Workflow: + * **`_test`** (default) + - Environment variables: + - *CM_OUTPUT_FOLDER_NAME*: `test_results` + - *CM_MLPERF_RUN_STYLE*: `test` + - Workflow: + * `_valid` + - Environment variables: + - *CM_OUTPUT_FOLDER_NAME*: `valid_results` + - *CM_MLPERF_RUN_STYLE*: `valid` + - Workflow: + +
+ + + * Group "**reproducibility**" +
+ Click here to expand this section. + + * `_r2.1_default` + - Environment variables: + - *CM_SKIP_SYS_UTILS*: `yes` + - *CM_TEST_QUERY_COUNT*: `100` + - Workflow: + * `_r3.0_default` + - Environment variables: + - *CM_SKIP_SYS_UTILS*: `yes` + - Workflow: + * `_r3.1_default` + - Workflow: + * `_r4.0_default` + - Workflow: + +
+ + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_3d-unet_` + - Environment variables: + - *CM_MLPERF_MODEL_EQUAL_ISSUE_MODE*: `yes` + - Workflow: + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_kits19,_int8 + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on) AND (CM_MLPERF_IMPLEMENTATION != nvidia)` + * CM names: `--adr.['mlperf-accuracy-script', '3d-unet-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_bert_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset,squad,language-processing + * `if (CM_DATASET_SQUAD_VAL_PATH not in on)` + - CM script: [get-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad) + * get,dataset-aux,squad-vocab + * `if (CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH not in on)` + - CM script: [get-dataset-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad-vocab) + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_squad + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on)` + * CM names: `--adr.['squad-accuracy-script', 'mlperf-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_dlrm_` + - Workflow: + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_terabyte,_float32 + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on)` + * CM names: `--adr.['terabyte-accuracy-script', 'mlperf-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_gptj_` + - Aliases: `_gptj` + - Environment variables: + - *CM_MLPERF_MODEL_EQUAL_ISSUE_MODE*: `yes` + - Workflow: + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_cnndm + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on) AND (CM_MLPERF_IMPLEMENTATION != intel)` + * CM names: `--adr.['cnndm-accuracy-script', 'mlperf-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_intel-original,gptj_` + - Workflow: + * `_llama2-70b_` + - Environment variables: + - *CM_MLPERF_MODEL_EQUAL_ISSUE_MODE*: `yes` + - Workflow: + 1. ***Read "posthook_deps" on other CM scripts*** + * run,accuracy,mlperf,_open-orca,_int32 + * `if (CM_MLPERF_LOADGEN_MODE in ['accuracy', 'all'] AND CM_MLPERF_ACCURACY_RESULTS_DIR == on) AND (CM_MLPERF_IMPLEMENTATION != nvidia)` + * CM names: `--adr.['mlperf-accuracy-script', 'open-orca-accuracy-script']...` + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + * `_reference,bert_` + - Workflow: + * `_reference,dlrm-v2_` + - Workflow: + * `_reference,gptj_` + - Workflow: + * `_reference,llama2-70b_` + - Workflow: + * `_reference,sdxl_` + - Workflow: + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_intel-original,bert-99` + - Workflow: + * `_intel-original,bert-99.9` + - Workflow: + * `_intel-original,gptj-99` + - Workflow: + * `_intel-original,gptj-99.9` + - Workflow: + * `_intel-original,gptj_,build-harness` + - Workflow: + * `_intel-original,resnet50` + - Workflow: + * `_intel-original,retinanet` + - Workflow: + * `_kilt,qaic,bert-99` + - Workflow: + * `_kilt,qaic,bert-99.9` + - Workflow: + * `_kilt,qaic,resnet50` + - Workflow: + * `_kilt,qaic,retinanet` + - Workflow: + * `_power` + - Environment variables: + - *CM_MLPERF_POWER*: `yes` + - *CM_SYSTEM_POWER*: `yes` + - Workflow: + * `_reference,resnet50` + - Workflow: + * `_reference,retinanet` + - Workflow: + * `_rnnt,reference` + - Environment variables: + - *CM_MLPERF_PRINT_SUMMARY*: `no` + - Workflow: + * `_valid,retinanet` + - Workflow: + +
+ + + * Group "**batch_size**" +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_MLPERF_LOADGEN_MAX_BATCHSIZE*: `#` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * **`_offline`** (default) + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + +
+ + +#### Unsupported or invalid variation combinations + + + +* `_resnet50,_pytorch` +* `_retinanet,_tf` +* `_nvidia-original,_tf` +* `_nvidia-original,_onnxruntime` +* `_nvidia-original,_pytorch` +* `_nvidia,_tf` +* `_nvidia,_onnxruntime` +* `_nvidia,_pytorch` +* `_gptj,_tf` + +#### Default variations + +`_cpu,_float32,_offline,_reference,_resnet50,_test` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value` +* `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` +* `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value` +* `--hw_name=value` → `CM_HW_NAME=value` +* `--imagenet_path=value` → `IMAGENET_PATH=value` +* `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` +* `--num_threads=value` → `CM_NUM_THREADS=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `OUTPUT_BASE_DIR=value` +* `--power=value` → `CM_MLPERF_POWER=value` +* `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` +* `--readme=value` → `CM_MLPERF_README=value` +* `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` +* `--rerun=value` → `CM_RERUN=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "clean":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_LOADGEN_MODE: `accuracy` +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_OUTPUT_FOLDER_NAME: `test_results` +* CM_MLPERF_RUN_STYLE: `test` +* CM_TEST_QUERY_COUNT: `10` +* CM_MLPERF_QUANTIZATION: `False` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlperf,inference,utils + - CM script: [get-mlperf-inference-utils](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-utils) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference/run.sh) + 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference/_cm.yaml)*** + * get,mlperf,sut,description + - CM script: [get-mlperf-inference-sut-description](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-sut-description) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-inference/_cm.yaml) + +___ +### Script output +`cmr "app vision language mlcommons mlperf inference generic [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_LOADGEN_COMPLIANCE_TEST` \ No newline at end of file diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml new file mode 100644 index 0000000000..a6dee6fba1 --- /dev/null +++ b/script/app-mlperf-inference/_cm.yaml @@ -0,0 +1,1258 @@ +# Identification of this CM script +alias: app-mlperf-inference +uid: d775cac873ee4231 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - inference + - generic + +# Default environment +default_env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + CM_TEST_QUERY_COUNT: '10' + CM_MLPERF_QUANTIZATION: off + +env: + CM_MLPERF_PRINT_SUMMARY: "no" + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + imagenet_path: IMAGENET_PATH + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: CM_MLPERF_LOADGEN_MODE + num_threads: CM_NUM_THREADS + output_dir: OUTPUT_BASE_DIR + power: CM_MLPERF_POWER + power_server: CM_MLPERF_POWER_SERVER_ADDRESS + ntp_server: CM_MLPERF_POWER_NTP_SERVER + max_amps: CM_MLPERF_POWER_MAX_AMPS + max_volts: CM_MLPERF_POWER_MAX_VOLTS + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + scenario: CM_MLPERF_LOADGEN_SCENARIO + test_query_count: CM_TEST_QUERY_COUNT + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + readme: CM_MLPERF_README + debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM + gpu_name: CM_NVIDIA_GPU_NAME + +# Duplicate CM environment variables to the ones used in native apps +env_key_mappings: + CM_HOST_: HOST_ + CM_ML_: ML_ + CM_MLPERF_TVM: MLPERF_TVM + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + +new_state_keys: + - app_mlperf_inference_* + - cm-mlperf-inference-results* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + - tags: get,mlperf,inference,utils + +posthook_deps: + - tags: get,mlperf,sut,description #populate system meta information like framework + +# Order of variations for documentation +variation_groups_order: + - implementation + - backend + - device + - model + - precision + - execution-mode + - reproducibility + +# Variations to customize dependencies +variations: + # Implementation (cpp, reference/python, nvidia, tflite-cpp) + cpp: + group: + implementation + add_deps_recursive: + imagenet-accuracy-script: + tags: _int64 + env: + CM_MLPERF_CPP: 'yes' + CM_MLPERF_IMPLEMENTATION: mlcommons_cpp + CM_IMAGENET_ACCURACY_DTYPE: float32 + CM_OPENIMAGES_ACCURACY_DTYPE: float32 + prehook_deps: + - names: + - cpp-mlperf-inference + - mlperf-inference-implementation + tags: app,mlperf,cpp,inference + skip_if_env: + CM_SKIP_RUN: + - yes + + mil: + alias: cpp + + mlcommons-cpp: + alias: cpp + + ctuning-cpp-tflite: + alias: tflite-cpp + + tflite-cpp: + default_variations: + backend: tflite + device: cpu + group: + implementation + add_deps_recursive: + imagenet-accuracy-script: + tags: _float32 + env: + CM_MLPERF_TFLITE_CPP: 'yes' + CM_MLPERF_CPP: 'yes' + CM_MLPERF_IMPLEMENTATION: ctuning_cpp_tflite + CM_IMAGENET_ACCURACY_DTYPE: float32 + prehook_deps: + - names: + - tflite-cpp-mlperf-inference + - mlperf-inference-implementation + tags: app,mlperf,tflite-cpp,inference + skip_if_env: + CM_SKIP_RUN: + - yes + + reference: + group: + implementation + default: + true + add_deps_recursive: + imagenet-accuracy-script: + tags: _float32 + squad-accuracy-script: + tags: _float32 + librispeech-accuracy-script: + tags: _int32 + env: + CM_MLPERF_PYTHON: 'yes' + CM_MLPERF_IMPLEMENTATION: mlcommons_python + CM_SQUAD_ACCURACY_DTYPE: float32 + CM_IMAGENET_ACCURACY_DTYPE: float32 + CM_OPENIMAGES_ACCURACY_DTYPE: float32 + CM_LIBRISPEECH_ACCURACY_DTYPE: float32 + prehook_deps: + - names: + - python-reference-mlperf-inference + - mlperf-inference-implementation + tags: app,mlperf,reference,inference + skip_if_env: + CM_SKIP_RUN: + - yes + + python: + alias: reference + + nvidia: + alias: nvidia-original + + mlcommons-python: + alias: reference + + reference,gptj_: + default_variations: + backend: pytorch + + reference,sdxl_: + default_variations: + backend: pytorch + + reference,dlrm-v2_: + default_variations: + backend: pytorch + + reference,llama2-70b_: + default_variations: + backend: pytorch + + reference,resnet50: + default_variations: + backend: onnxruntime + + reference,retinanet: + default_variations: + backend: onnxruntime + + reference,bert_: + default_variations: + backend: onnxruntime + + nvidia-original: + docker: + interactive: True + extra_run_args: ' --runtime=nvidia --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v3.1-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-l4-public + docker:os_version: "20.04" + deps: + - tags: get,mlperf,inference,nvidia,scratch,space + - tags: get,nvidia-docker + mounts: + - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}" + - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}" + - "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}" + - "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}" + default_variations: + backend: tensorrt + device: cuda + group: + implementation + add_deps_recursive: + imagenet-accuracy-script: + tags: _int32 + squad-accuracy-script: + tags: _float16 + librispeech-accuracy-script: + tags: _int8 + cnndm-accuracy-script: + tags: _int32 + env: + CM_MLPERF_IMPLEMENTATION: nvidia + CM_SQUAD_ACCURACY_DTYPE: float16 + CM_IMAGENET_ACCURACY_DTYPE: int32 + CM_CNNDM_ACCURACY_DTYPE: int32 + CM_LIBRISPEECH_ACCURACY_DTYPE: int8 + deps: + - tags: get,cuda-devices + skip_if_env: + CM_CUDA_DEVICE_PROP_GLOBAL_MEMORY: + - "yes" + - "on" + prehook_deps: + - names: + - nvidia-original-mlperf-inference + - nvidia-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,nvidia,inference,_run_harness + skip_if_env: + CM_SKIP_RUN: + - yes + update_tags_from_env_with_prefix: + "_gpu_memory." : + - CM_NVIDIA_GPU_MEMORY + update_tags_from_env: + - CM_NVIDIA_HARNESS_GPU_VARIATION + + intel: + alias: intel-original + + intel-original: + group: + implementation + docker: + interactive: True + extra_run_args: ' --privileged' + mounts: + - "${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}" + - "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}" + skip_run_cmd: 'no' + shm_size: '32gb' + docker_os: ubuntu + docker_real_run: false + run: true + docker_input_mapping: + imagenet_path: IMAGENET_PATH + gptj_checkpoint_path: GPTJ_CHECKPOINT_PATH + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + dlrm_data_path: DLRM_DATA_PATH + intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + default_variations: + device: cpu + backend: pytorch + prehook_deps: + - names: + - intel + - intel-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,inference,intel + skip_if_env: + CM_SKIP_RUN: + - yes + env: + CM_MLPERF_IMPLEMENTATION: intel + + intel-original,gptj_: + docker: + deps: + - tags: get,ml-model,gptj + + intel-original,gptj_,build-harness: + docker: + run: false + + qualcomm: + alias: kilt + + kilt: + group: + implementation + default_variations: + device: qaic + backend: glow + prehook_deps: + - names: + - kilt + - kilt-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,inference,kilt + skip_if_env: + CM_SKIP_RUN: + - yes + env: + CM_MLPERF_IMPLEMENTATION: qualcomm + docker: + interactive: True + + kilt,qaic,resnet50: + default_variations: + precision: uint8 + + kilt,qaic,retinanet: + default_variations: + precision: uint8 + + kilt,qaic,bert-99: + default_variations: + precision: uint8 + + kilt,qaic,bert-99.9: + default_variations: + precision: float16 + + intel-original,resnet50: + default_variations: + precision: int8 + + intel-original,retinanet: + default_variations: + precision: int8 + + intel-original,bert-99: + default_variations: + precision: int8 + + intel-original,bert-99.9: + default_variations: + precision: int8 + + intel-original,gptj-99: + default_variations: + precision: int4 + + intel-original,gptj-99.9: + default_variations: + precision: bfloat16 + + resnet50: + group: + model + default: + true + env: + CM_MODEL: + resnet50 + deps: + - tags: get,dataset-aux,imagenet-aux + add_deps_recursive: + mlperf-inference-implementation: + tags: _resnet50 + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - imagenet-accuracy-script + tags: run,accuracy,mlperf,_imagenet + docker: + deps: + - tags: get,dataset,imagenet,original + names: + - imagenet-original + - dataset-original + + retinanet: + group: + model + env: + CM_MODEL: + retinanet + add_deps_recursive: + mlperf-inference-implementation: + tags: _retinanet + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - openimages-accuracy-script + tags: run,accuracy,mlperf,_openimages + + 3d-unet-99: + group: + model + base: + - 3d-unet_ + env: + CM_MODEL: + 3d-unet-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _3d-unet-99 + + 3d-unet-99.9: + group: + model + base: + - 3d-unet_ + env: + CM_MODEL: + 3d-unet-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _3d-unet-99.9 + + 3d-unet_: + env: + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - 3d-unet-accuracy-script + tags: run,accuracy,mlperf,_kits19,_int8 + + sdxl: + group: + model + env: + CM_MODEL: + stable-diffusion-xl + default_variations: + precision: float16 + backend: pytorch + device: cuda + add_deps_recursive: + mlperf-inference-implementation: + tags: _sdxl + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - coco2014-accuracy-script + tags: run,accuracy,mlperf,_coco2014 + + llama2-70b_: + env: + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - open-orca-accuracy-script + tags: run,accuracy,mlperf,_open-orca,_int32 + + llama2-70b-99: + group: + model + base: + - llama2-70b_ + env: + CM_MODEL: + llama2-70b-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _llama2-70b-99 + + llama2-70b-99.9: + group: + model + base: + - llama2-70b_ + env: + CM_MODEL: + llama2-70b-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _llama2-70b-99.9 + + rnnt: + group: + model + env: + CM_MODEL: + rnnt + add_deps_recursive: + mlperf-inference-implementation: + tags: _rnnt + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - librispeech-accuracy-script + tags: run,accuracy,mlperf,_librispeech + + rnnt,reference: + env: + CM_MLPERF_PRINT_SUMMARY: "no" + + gptj-99: + group: + model + base: + - gptj_ + env: + CM_MODEL: + gptj-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _gptj-99 + + gptj-99.9: + group: + model + base: + - gptj_ + env: + CM_MODEL: + gptj-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _gptj-99.9 + + gptj: + alias: gptj_ + + gptj_: + env: + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - intel + names: + - cnndm-accuracy-script + - mlperf-accuracy-script + tags: run,accuracy,mlperf,_cnndm + + bert_: + deps: + - skip_if_env: + CM_DATASET_SQUAD_VAL_PATH: "on" + tags: get,dataset,squad,language-processing + - skip_if_env: + CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: "on" + tags: get,dataset-aux,squad-vocab + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - squad-accuracy-script + - mlperf-accuracy-script + tags: run,accuracy,mlperf,_squad + add_deps_recursive: + inference-src: + tags: _deeplearningexamples + + bert-99: + group: + model + base: + - bert_ + env: + CM_MODEL: + bert-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _bert-99 + + bert-99.9: + group: + model + base: + - bert_ + env: + CM_MODEL: + bert-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _bert-99.9 + + dlrm_: + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - terabyte-accuracy-script + - mlperf-accuracy-script + tags: run,accuracy,mlperf,_terabyte,_float32 + + dlrm-v2-99: + group: + model + base: + - dlrm_ + env: + CM_MODEL: + dlrm-v2-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _dlrm-v2-99 + + dlrm-v2-99.9: + group: + model + base: + - dlrm_ + env: + CM_MODEL: + dlrm-v2-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _dlrm-v2-99.9 + + mobilenet: + group: + model + env: + CM_MODEL: + mobilenet + add_deps_recursive: + mlperf-inference-implementation: + tags: _mobilenet + deps: + - tags: get,dataset-aux,imagenet-aux + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - imagenet-accuracy-script + tags: run,accuracy,mlperf,_imagenet + + efficientnet: + group: + model + env: + CM_MODEL: + efficientnet + add_deps_recursive: + mlperf-inference-implementation: + tags: _efficientnet + deps: + - tags: get,dataset-aux,imagenet-aux + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - imagenet-accuracy-script + tags: run,accuracy,mlperf,_imagenet + + onnxruntime: + group: backend + env: + CM_MLPERF_BACKEND: + onnxruntime + add_deps_recursive: + mlperf-inference-implementation: + tags: _onnxruntime + + tensorrt: + group: backend + env: + CM_MLPERF_BACKEND: + tensorrt + add_deps_recursive: + mlperf-inference-implementation: + tags: _tensorrt + + tf: + group: backend + env: + CM_MLPERF_BACKEND: + tf + add_deps_recursive: + mlperf-inference-implementation: + tags: _tf + + pytorch: + group: backend + env: + CM_MLPERF_BACKEND: + pytorch + add_deps_recursive: + mlperf-inference-implementation: + tags: _pytorch + + ncnn: + group: backend + env: + CM_MLPERF_BACKEND: + ncnn + add_deps_recursive: + mlperf-inference-implementation: + tags: _ncnn + + deepsparse: + group: backend + default_variations: + precision: int8 + env: + CM_MLPERF_BACKEND: + deepsparse + add_deps_recursive: + mlperf-inference-implementation: + tags: _deepsparse + + tflite: + group: backend + env: + CM_MLPERF_BACKEND: tflite + add_deps_recursive: + mlperf-inference-implementation: + tags: _tflite + + glow: + group: backend + env: + CM_MLPERF_BACKEND: glow + add_deps_recursive: + mlperf-inference-implementation: + tags: _glow + + tvm-onnx: + group: backend + base: + - batch_size.1 + env: + CM_MLPERF_BACKEND: tvm-onnx + add_deps_recursive: + mlperf-inference-implementation: + tags: _tvm-onnx + + tvm-pytorch: + group: backend + base: + - batch_size.1 + env: + CM_MLPERF_BACKEND: tvm-pytorch + add_deps_recursive: + mlperf-inference-implementation: + tags: _tvm-pytorch + + tvm-tflite: + group: backend + base: + - batch_size.1 + env: + CM_MLPERF_BACKEND: tvm-tflite + add_deps_recursive: + mlperf-inference-implementation: + tags: _tvm-tflite + + ray: + group: backend + env: + CM_MLPERF_BACKEND: + ray + add_deps_recursive: + mlperf-inference-implementation: + tags: _ray + + cpu: + group: + device + default: + True + env: + CM_MLPERF_DEVICE: + cpu + add_deps_recursive: + mlperf-inference-implementation: + tags: _cpu + cuda: + docker: + all_gpus: 'yes' + group: + device + env: + CM_MLPERF_DEVICE: + gpu + add_deps_recursive: + mlperf-inference-implementation: + tags: _cuda + rocm: + docker: + all_gpus: 'yes' + group: + device + env: + CM_MLPERF_DEVICE: + rocm + add_deps_recursive: + mlperf-inference-implementation: + tags: _rocm + qaic: + group: + device + env: + CM_MLPERF_DEVICE: + qaic + add_deps_recursive: + mlperf-inference-implementation: + tags: _qaic + + tpu: + group: + device + env: + CM_MLPERF_DEVICE: + tpu + add_deps_recursive: + mlperf-inference-implementation: + tags: _tpu + + # Execution modes + fast: + group: execution-mode + env: + CM_FAST_FACTOR: '5' + CM_OUTPUT_FOLDER_NAME: fast_results + CM_MLPERF_RUN_STYLE: fast + + test: + group: execution-mode + default: true + env: + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + + valid,retinanet: + adr: + openimages-accuracy-script: + tags: _nvidia-pycocotools + + valid: + group: execution-mode + env: + CM_OUTPUT_FOLDER_NAME: valid_results + CM_MLPERF_RUN_STYLE: valid + + # Model precision + quantized: + alias: int8 + + fp32: + alias: float32 + + float32: + group: precision + default: true + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float32 + add_deps_recursive: + python-reference-mlperf-inference: + tags: _fp32 + kilt-harness: + tags: _fp32 + + float16: + group: precision + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float32 + add_deps_recursive: + python-reference-mlperf-inference: + tags: _float16 + kilt-harness: + tags: _fp16 + + bfloat16: + group: precision + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float32 + add_deps_recursive: + python-reference-mlperf-inference: + tags: _bfloat16 + + int4: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: int4 + add_deps_recursive: + mlperf-inference-implementation: + tags: _int4 + int8: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: int8 + add_deps_recursive: + mlperf-inference-implementation: + tags: _int8 + kilt-harness: + tags: _int8 + + uint8: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: uint8 + add_deps_recursive: + mlperf-inference-implementation: + tags: _uint8 + kilt-harness: + tags: _uint8 + + offline: + group: loadgen-scenario + default: true + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + add_deps_recursive: + mlperf-inference-implementation: + tags: _offline + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + add_deps_recursive: + mlperf-inference-implementation: + tags: _multistream + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + add_deps_recursive: + mlperf-inference-implementation: + tags: _singlestream + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + add_deps_recursive: + mlperf-inference-implementation: + tags: _server + + power: + env: + CM_MLPERF_POWER: 'yes' + CM_SYSTEM_POWER: 'yes' + add_deps_recursive: + mlperf-runner: + tags: + _power + + batch_size.#: + group: batch_size + env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '#' + add_deps_recursive: + mlperf-inference-implementation: + tags: _batch_size.# + + # Reproducibility (past submissions) + r2.1_default: + group: + reproducibility + add_deps_recursive: + compiler: + tags: llvm + inference-src: + tags: _octoml + loadgen: + version: r2.1 + nvidia-inference-common-code: + version: r2.1 + tags: _custom + nvidia-inference-server: + version: r2.1 + tags: _custom + env: + CM_SKIP_SYS_UTILS: 'yes' + CM_TEST_QUERY_COUNT: '100' + + r3.0_default: + group: + reproducibility + add_deps_recursive: + compiler: + tags: gcc + cuda: + version_max: "11.8" + nvidia-inference-common-code: + version: r2.1 + tags: _custom + nvidia-inference-server: + version: r2.1 + tags: _custom + env: + CM_SKIP_SYS_UTILS: 'yes' + + r3.1_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r3.0 + tags: _nvidia-only + nvidia-inference-server: + version: r3.0 + tags: _nvidia-only + default_env: + CM_SKIP_SYS_UTILS: 'yes' + CM_REGENERATE_MEASURE_FILES: 'yes' + + r4.0_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r3.1 + tags: _ctuning + nvidia-inference-server: + version: r3.1 + tags: _ctuning + default_env: + CM_SKIP_SYS_UTILS: 'yes' + CM_REGENERATE_MEASURE_FILES: 'yes' + +invalid_variation_combinations: + - + - resnet50 + - pytorch + - + - retinanet + - tf + - + - nvidia-original + - tf + - + - nvidia-original + - onnxruntime + - + - nvidia-original + - pytorch + - + - nvidia + - tf + - + - nvidia + - onnxruntime + - + - nvidia + - pytorch + - + - gptj + - tf + +input_description: + scenario: + desc: "MLPerf inference scenario" + choices: + - Offline + - Server + - SingleStream + - MultiStream + default: Offline + mode: + desc: "MLPerf inference mode" + choices: + - performance + - accuracy + default: accuracy + test_query_count: + desc: "Specifies the number of samples to be processed during a test run" + target_qps: + desc: "Target QPS" + target_latency: + desc: "Target Latency" + max_batchsize: + desc: "Maximum batchsize to be used" + num_threads: + desc: "Number of CPU threads to launch the application with" + hw_name: + desc: "Valid value - any system description which has a config file (under same name) defined [here](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-configs-sut-mlperf-inference/configs)" + output_dir: + desc: "Location where the outputs are produced" + rerun: + desc: "Redo the run even if previous run files exist" + boolean: true + default: true + regenerate_files: + desc: "Regenerates measurement files including accuracy.txt files even if a previous run exists. This option is redundant if `--rerun` is used" + boolean: true + adr.python.name: + desc: "Python virtual environment name (optional)" + default: mlperf + adr.python.version_min: + desc: "Minimal Python version" + default: "3.8" + adr.python.version: + desc: "Force Python version (must have all system deps)" + adr.compiler.tags: + desc: "Compiler for loadgen" + default: gcc + adr.inference-src-loadgen.env.CM_GIT_URL: + desc: "Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations)" + adr.inference-src.env.CM_GIT_URL: + desc: "Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations)" + quiet: + desc: "Quiet run (select default values for all questions)" + boolean: true + default: false + readme: + desc: "Generate README with the reproducibility report" + debug: + desc: "Debug MLPerf script" + +gui: + title: "CM GUI for the MLPerf inference benchmark" + +docker: + deps: + - tags: get,mlperf,inference,results,dir + - tags: get,mlperf,inference,submission,dir + pre_run_cmds: + - cm pull repo + mounts: + - "${{ CM_DATASET_IMAGENET_PATH }}:${{ CM_DATASET_IMAGENET_PATH }}" + - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}" + - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" + skip_run_cmd: 'no' + shm_size: '32gb' + extra_run_args: ' --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + docker_os: ubuntu + docker_real_run: False + docker_os_version: '22.04' + docker_input_mapping: + imagenet_path: IMAGENET_PATH + gptj_checkpoint_path: GPTJ_CHECKPOINT_PATH + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + results_dir: RESULTS_DIR + submission_dir: SUBMISSION_DIR + dlrm_data_path: DLRM_DATA_PATH + intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH diff --git a/script/app-mlperf-inference/build_dockerfiles.py b/script/app-mlperf-inference/build_dockerfiles.py new file mode 100644 index 0000000000..10579d33ea --- /dev/null +++ b/script/app-mlperf-inference/build_dockerfiles.py @@ -0,0 +1,98 @@ +import cmind +import os +import pathlib +current_file_path = pathlib.Path(__file__).parent.resolve() +docker_os = { + "ubuntu": ["18.04","20.04","22.04"], + "rhel": ["9"] + } +dataset = { + "resnet50": "imagenet", + "retinanet": "openimages", + "bert-99.9": "squad" + } +variations = { + "resnet50": { + "tensorflow": { + "cpu": [ "python" ] + }, + "onnxruntime": { + "cpu": [ "python", "cpp" ] + }, + "pytorch": { + "cpu": [ ] + } + }, + "retinanet": { + "tensorflow": { + }, + "onnxruntime": { + "cpu": [ "python", "cpp" ] + }, + "pytorch": { + "cpu": [ "python" ] + } + }, + "bert-99.9": { + "tensorflow": { + "cpu": [ "python" ] + }, + "onnxruntime": { + "cpu": [ "python" ] + }, + "pytorch": { + "cpu": [] + } + } + } + +for _os in docker_os: + for version in docker_os[_os]: + for model in variations: + for backend in variations[model]: + for device in variations[model][backend]: + for implementation in variations[model][backend][device]: + variation_string=",_"+model+",_"+backend+",_"+device+",_"+implementation + file_name_ext = "_" + implementation + "_" + backend+"_"+device + dockerfile_path = os.path.join(current_file_path,'dockerfiles', model, _os +'_'+version+ file_name_ext +'.Dockerfile') + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': 'app,mlperf,inference,generic'+variation_string, + 'adr': {'compiler': + {'tags': 'gcc'}, + 'inference-src': + {'tags': '_octoml'}, + 'openimages-preprocessed': + {'tags': '_50'} + }, + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + r = cmind.access(cm_input) + print_deps = r['new_state']['print_deps'] + comments = [ "#RUN " + dep for dep in print_deps ] + comments.append("") + comments.append("# Run CM workflow for MLPerf inference") + cm_docker_input = {'action': 'run', + 'automation': 'script', + 'tags': 'build,dockerfile', + 'docker_os': _os, + 'docker_os_version': version, + 'file_path': dockerfile_path, + 'comments': comments, + 'run_cmd': 'cm run script --tags=app,mlperf,inference,generic'+variation_string+' --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml', + 'script_tags': 'app,mlperf,inference,generic', + 'quiet': True, + 'print_deps': True, + 'real_run': True + } + r = cmind.access(cm_docker_input) + if r['return'] > 0: + print(r) + exit(1) + + print ('') + print ("Dockerfile generated at " + dockerfile_path) + diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py new file mode 100644 index 0000000000..23a7f75ce8 --- /dev/null +++ b/script/app-mlperf-inference/customize.py @@ -0,0 +1,496 @@ +from cmind import utils + +import os +import json +import shutil +import subprocess +import copy +import cmind as cm +import platform +import sys +import mlperf_utils + +def preprocess(i): + + env = i['env'] + state = i['state'] + + if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'nvidia': + if env.get('CM_NVIDIA_GPU_NAME', '') in [ "rtx_4090", "a100", "t4", "l4", "orin", "custom" ]: + env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + env['CM_NVIDIA_GPU_NAME'] + env['CM_NVIDIA_GPU_MEMORY'] = '' + else: + gpu_memory = i['state'].get('cm_cuda_device_prop','').get('Global memory') + gpu_memory_size = str(int((float(gpu_memory)/(1024*1024*1024) +7)/8) * 8) + env['CM_NVIDIA_GPU_MEMORY'] = gpu_memory_size + env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = '' + + if 'cmd' in i['input']: + state['mlperf_inference_run_cmd'] = "cm run script " + " ".join(i['input']['cmd']) + + state['mlperf-inference-implementation'] = {} + + run_state = i['run_script_input']['run_state'] + state['mlperf-inference-implementation']['script_id'] = run_state['script_id']+":"+",".join(run_state['script_variation_tags']) + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + + xsep = '^' if os_info['platform'] == 'windows' else '\\' + + env = i['env'] + inp = i['input'] + env['CMD'] = '' + state = i['state'] + + if env.get('CM_MLPERF_USER_CONF', '') == '': + return {'return': 0} + + output_dir = env['CM_MLPERF_OUTPUT_DIR'] + mode = env['CM_MLPERF_LOADGEN_MODE'] + + if not os.path.exists(output_dir) or not os.path.exists(os.path.join(output_dir, "mlperf_log_summary.txt")): + # No output, fake_run? + return {'return': 0} + + #in power mode copy the log files from tmp_power directory + if env.get('CM_MLPERF_POWER', '') == "yes" and mode == "performance": + mlperf_power_logs_dir = os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "..", "power") + mlperf_ranging_logs_dir = os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "..", "ranging") + + if os.path.exists(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "power")): + if os.path.exists(mlperf_power_logs_dir): + shutil.rmtree(mlperf_power_logs_dir) + shutil.copytree(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "power"), mlperf_power_logs_dir) + + if os.path.exists(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "ranging")): + if os.path.exists(mlperf_ranging_logs_dir): + shutil.rmtree(mlperf_ranging_logs_dir) + shutil.copytree(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "ranging"), mlperf_ranging_logs_dir) + + if os.path.exists(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")): + shutil.copyfile(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt"), os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "spl.txt")) + + model = env['CM_MODEL'] + model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model) + + if mode == "accuracy" or mode== "compliance" and env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": + if model == "resnet50": + accuracy_filename = "accuracy-imagenet.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ + accuracy_filename) + dataset_args = " --imagenet-val-file " + \ + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + accuracy_log_file_option_name = " --mlperf-accuracy-file " + datatype_option = " --dtype "+env['CM_IMAGENET_ACCURACY_DTYPE'] + + elif model == "retinanet": + accuracy_filename = "accuracy-openimages.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ + accuracy_filename) + dataset_args = " --openimages-dir " + os.getcwd() #just to make the script happy + accuracy_log_file_option_name = " --mlperf-accuracy-file " + datatype_option = "" + + elif 'bert' in model: + accuracy_filename = "accuracy-squad.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) + dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + env['CM_DATASET_SQUAD_VOCAB_PATH'] + "' --out_file predictions.json " + accuracy_log_file_option_name = " --log_file " + datatype_option = " --output_dtype "+env['CM_SQUAD_ACCURACY_DTYPE'] + + elif 'stable-diffusion-xl' in model: + pass #No compliance check for now + elif 'gpt' in model: + pass #No compliance check for now + elif 'llama2-70b' in model: + pass #No compliance check for now + else: + pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test + #return {'return': 1, 'error': f'Accuracy paths not done for model {model}'} + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + + #if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode == "performance" and scenario != "Server": + if mode == "performance" and scenario != "Server": + os.chdir(output_dir) + if not os.path.exists("mlperf_log_summary.txt"): + return {'return': 0} + + if scenario in [ "Offline", "Server" ]: + metric = "target_qps" + elif scenario.endswith("Stream"): + metric = "target_latency" + else: + return {'return': 1, 'error': 'Unsupported scenario: {}'.format(scenario)} + + import re + import yaml + pattern = {} + pattern["Offline"] = "Samples per second: (.*)\n" + pattern["SingleStream"] = "Mean latency \(ns\)\s*:(.*)" + pattern["MultiStream"] = "Mean latency \(ns\)\s*:(.*)" + print("\n") + with open("mlperf_log_summary.txt", "r") as fp: + summary = fp.read() + + result = re.findall(pattern[scenario], summary) + + if not result: + return {'return': 1, 'error': f'No {metric} found in performance summary. Pattern checked "{pattern[metric]}"'} + + value = result[0].strip() + if "\(ns\)" in pattern[scenario]: + value = str(float(value)/1000000) #convert to milliseconds + + sut_name = state['CM_SUT_CONFIG_NAME'] + sut_config = state['CM_SUT_CONFIG'][sut_name] + sut_config_path = state['CM_SUT_CONFIG_PATH'][sut_name] + if scenario not in sut_config[model_full_name]: + sut_config[model_full_name][scenario] = {} + sut_config[model_full_name][scenario][metric] = value + + print(f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} updated as {value}") + print(f"New config stored in {sut_config_path}") + with open(sut_config_path, "w") as f: + yaml.dump(sut_config, f) + + + if mode in [ "performance", "accuracy" ]: + measurements = {} + measurements['starting_weights_filename'] = env.get('CM_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get('CM_ML_MODEL_FILE', '')) + measurements['retraining'] = env.get('CM_ML_MODEL_RETRAINING','no') + measurements['input_data_types'] = env.get('CM_ML_MODEL_INPUTS_DATA_TYPE', 'fp32') + measurements['weight_data_types'] = env.get('CM_ML_MODEL_WEIGHTS_DATA_TYPE', 'fp32') + measurements['weight_transformations'] = env.get('CM_ML_MODEL_WEIGHT_TRANSFORMATIONS', 'none') + + os.chdir(output_dir) + + if not os.path.exists("mlperf_log_summary.txt"): + return {'return': 0} + + mlperf_log_summary = '' + if os.path.isfile("mlperf_log_summary.txt"): + with open("mlperf_log_summary.txt", "r") as fp: + mlperf_log_summary=fp.read() + + if mlperf_log_summary!='': + state['app_mlperf_inference_log_summary']={} + for x in mlperf_log_summary.split('\n'): + y = x.split(': ') + if len(y)==2: + state['app_mlperf_inference_log_summary'][y[0].strip().lower()]=y[1].strip() + + if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [ "no", "0", "false"]: + print("\n") + print(mlperf_log_summary) + + with open ("measurements.json", "w") as fp: + json.dump(measurements, fp, indent=2) + + system_meta = state['CM_SUT_META'] + with open("system_meta.json", "w") as fp: + json.dump(system_meta, fp, indent=2) + + # Add to the state + state['app_mlperf_inference_measurements'] = copy.deepcopy(measurements) + + if os.path.exists(env['CM_MLPERF_CONF']): + shutil.copy(env['CM_MLPERF_CONF'], 'mlperf.conf') + + if os.path.exists(env['CM_MLPERF_USER_CONF']): + shutil.copy(env['CM_MLPERF_USER_CONF'], 'user.conf') + + result, valid, power_result = mlperf_utils.get_result_from_log(env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode) + power = None + power_efficiency = None + if power_result: + power_result_split = power_result.split(",") + if len(power_result_split) == 2: #power and power efficiency + power = power_result_split[0] + power_efficiency = power_result_split[1] + + if not state.get('cm-mlperf-inference-results'): + state['cm-mlperf-inference-results'] = {} + if not state.get('cm-mlperf-inference-results-last'): + state['cm-mlperf-inference-results-last'] = {} + if not state['cm-mlperf-inference-results'].get(state['CM_SUT_CONFIG_NAME']): + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']] = {} + if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']].get(model): + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model] = {} + if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model].get(scenario): + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario] = {} + + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario][mode] = result + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario][mode+'_valid'] = valid.get(mode, False) + + state['cm-mlperf-inference-results-last'][mode] = result + state['cm-mlperf-inference-results-last'][mode+'_valid'] = valid.get(mode, False) + + if power: + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario]['power'] = power + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario]['power_valid'] = valid['power'] + state['cm-mlperf-inference-results-last']['power'] = power + state['cm-mlperf-inference-results-last']['power_valid'] = valid['power'] + if power_efficiency: + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario]['power_efficiency'] = power_efficiency + state['cm-mlperf-inference-results-last']['power_efficiency'] = power_efficiency + + # Record basic host info + host_info = { + "os_version":platform.platform(), + "cpu_version":platform.processor(), + "python_version":sys.version, + "cm_version":cm.__version__ + } + + x = '' + if env.get('CM_HOST_OS_FLAVOR','')!='': x+=env['CM_HOST_OS_FLAVOR'] + if env.get('CM_HOST_OS_VERSION','')!='': x+=' '+env['CM_HOST_OS_VERSION'] + if x!='': host_info['os_version_sys'] = x + + if env.get('CM_HOST_SYSTEM_NAME','')!='': host_info['system_name']=env['CM_HOST_SYSTEM_NAME'] + + # Check CM automation repository + repo_name = 'mlcommons@ck' + repo_hash = '' + r = cm.access({'action':'find', 'automation':'repo', 'artifact':'mlcommons@ck,a4705959af8e447a'}) + if r['return']==0 and len(r['list'])==1: + repo_path = r['list'][0].path + if os.path.isdir(repo_path): + repo_name = os.path.basename(repo_path) + + # Check Grigori's dev + if repo_name == 'ck': repo_name = 'ctuning@mlcommons-ck' + + r = cm.access({'action':'system', + 'automation':'utils', + 'path':repo_path, + 'cmd':'git rev-parse HEAD'}) + if r['return'] == 0 and r['ret'] == 0: + repo_hash = r['stdout'] + + host_info['cm_repo_name'] = repo_name + host_info['cm_repo_git_hash'] = repo_hash + + # Check a few important MLCommons repos + xhashes = [] + md_xhashes = '' + + for x in [('get,git,inference', ['inference']), + ('get,git,mlperf,power', ['power-dev'])]: + xtags = x[0] + xdirs = x[1] + + rx = cm.access({'action':'find', 'automation':'cache', 'tags':xtags}) + if rx['return']>0: return rx + for cache in rx['list']: + xurl = '' + xhash = '' + + for xd in xdirs: + xpath = os.path.join(cache.path, xd) + if os.path.isdir(xpath): + r = cm.access({'action':'system', 'automation':'utils', 'path':xpath, 'cmd':'git rev-parse HEAD'}) + if r['return'] == 0 and r['ret'] == 0: + xhash = r['stdout'] + + r = cm.access({'action':'system', 'automation':'utils', 'path':xpath, 'cmd':'git config --get remote.origin.url'}) + if r['return'] == 0 and r['ret'] == 0: + xurl = r['stdout'] + + if xurl!='' and xhash!='': + break + + if xurl!='' and xhash!='': + # Check if doesn't exist + found = False + + for xh in xhashes: + if xh['mlcommons_git_url'] == xurl and xh['mlcommons_git_hash'] == xhash: + found = True + break + + if not found: + xhashes.append({'mlcommons_git_url': xurl, + 'mlcommons_git_hash': xhash, + 'cm_cache_tags':cache.meta['tags']}) + + md_xhashes +='* MLCommons Git {} ({})\n'.format(xurl, xhash) + + if len(xhashes)>0: + host_info['mlcommons_repos'] = xhashes + + with open ("cm-host-info.json", "w") as fp: + fp.write(json.dumps(host_info, indent=2)+'\n') + + # Prepare README + if "cmd" in inp: + cmd = "cm run script \\\n\t"+" \\\n\t".join(inp['cmd']) + xcmd = "cm run script "+xsep+"\n\t" + (" "+xsep+"\n\t").join(inp['cmd']) + else: + cmd = "" + xcmd = "" + + readme_init = "This experiment is generated using the [MLCommons Collective Mind automation framework (CM)](https://github.com/mlcommons/ck).\n\n" + + readme_init+= "*Check [CM MLPerf docs](https://github.com/mlcommons/ck/tree/master/docs/mlperf) for more details.*\n\n" + + readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLCommons CM version: {}\n{}\n\n".format(platform.platform(), + platform.processor(), sys.version, cm.__version__, md_xhashes) + + x = repo_name + if repo_hash!='': x+=' --checkout='+str(repo_hash) + + readme_body += "## CM Run Command\n\nSee [CM installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md).\n\n"+ \ + "```bash\npip install -U cmind\n\ncm rm cache -f\n\ncm pull repo {}\n\n{}\n```".format(x, xcmd) + + readme_body += "\n*Note that if you want to use the [latest automation recipes](https://access.cknowledge.org/playground/?action=scripts) for MLPerf (CM scripts),\n"+ \ + " you should simply reload {} without checkout and clean CM cache as follows:*\n\n".format(repo_name) + \ + "```bash\ncm rm repo {}\ncm pull repo {}\ncm rm cache -f\n\n```".format(repo_name, repo_name) + + extra_readme_init = '' + extra_readme_body = '' + if env.get('CM_MLPERF_README', '') == "yes": + extra_readme_body += "\n## Dependent CM scripts\n\n" + + script_tags = inp['tags'] + script_adr = inp.get('adr', {}) + + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'print_deps': True, + 'env': env, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + r = cm.access(cm_input) + if r['return'] > 0: + return r + + print_deps = r['new_state']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + str(count) +". `" +dep+ "`\n" + count = count+1 + + if state.get('mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): + + extra_readme_body += "\n## Dependent CM scripts for the MLPerf Inference Implementation\n" + + print_deps = state['mlperf-inference-implementation']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + str(count) +". `" +dep+"`\n" + count = count+1 + + readme = readme_init + readme_body + extra_readme = extra_readme_init + extra_readme_body + + with open ("README.md", "w") as fp: + fp.write(readme) + with open ("README-extra.md", "w") as fp: + fp.write(extra_readme) + + elif mode == "compliance": + + test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + + RESULT_DIR = os.path.split(output_dir)[0] + COMPLIANCE_DIR = output_dir + OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) + + SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " " + SCRIPT_PATH + " -r " + RESULT_DIR + " -c " + COMPLIANCE_DIR + " -o "+ OUTPUT_DIR + print(cmd) + os.system(cmd) + + if test == "TEST01": + + run_script_input = i['run_script_input'] + automation = i['automation'] + + SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, + "create_accuracy_baseline.sh") + TEST01_DIR = os.path.join(OUTPUT_DIR, "TEST01") + OUTPUT_DIR = os.path.join(OUTPUT_DIR, "TEST01", "accuracy") + if not os.path.exists(OUTPUT_DIR): + os.makedirs(OUTPUT_DIR) + + ACCURACY_DIR = os.path.join(RESULT_DIR, "accuracy") + if not os.path.exists(ACCURACY_DIR): + print("Accuracy run not yet completed") + return {'return':1, 'error': 'TEST01 needs accuracy run to be completed first'} + + cmd = "cd " + TEST01_DIR + " && bash " + SCRIPT_PATH + " " + os.path.join(ACCURACY_DIR, "mlperf_log_accuracy.json") + " " + \ + os.path.join(COMPLIANCE_DIR, "mlperf_log_accuracy.json") + env['CMD'] = cmd + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'verify_accuracy'}) + if r['return']>0: + return r + + verify_accuracy_file = os.path.join(TEST01_DIR, "verify_accuracy.txt") + with open(verify_accuracy_file, 'r') as file: + data = file.read().replace('\n', '\t') + + if 'TEST PASS' not in data: + print("\nDeterministic TEST01 failed... Trying with non-determinism.\n") + # #Normal test failed, trying the check with non-determinism + + CMD = "cd "+ ACCURACY_DIR+" && "+ env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + os.path.join(TEST01_DIR, "mlperf_log_accuracy_baseline.json") + dataset_args + datatype_option + " > " + \ + os.path.join(OUTPUT_DIR, "baseline_accuracy.txt") + + env['CMD'] = CMD + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'verify_accuracy'}) + if r['return']>0: return r + + CMD = "cd " + ACCURACY_DIR + " && "+env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + os.path.join(TEST01_DIR, "mlperf_log_accuracy.json") + dataset_args + datatype_option + " > " + \ + os.path.join(OUTPUT_DIR, "compliance_accuracy.txt") + + env['CMD'] = CMD + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'verify_accuracy'}) + if r['return']>0: return r + import submission_checker as checker + is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR) + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario][test] = "passed" if is_valid else "failed" + + else: + print(test) + + + if state.get('mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): + with open(os.path.join(output_dir, "cm-version-info.json"), "w") as f: + f.write(json.dumps(state['mlperf-inference-implementation']['version_info'], indent=2)) + + if env.get('CM_DUMP_SYSTEM_INFO', True): + dump_script_output("detect,os", env, state, 'new_env', os.path.join(output_dir, "os_info.json")) + dump_script_output("detect,cpu", env, state, 'new_env', os.path.join(output_dir, "cpu_info.json")) + env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") + dump_script_output("dump,pip,freeze", env, state, 'new_state', os.path.join(output_dir, "pip_freeze.json")) + + return {'return':0} + +def dump_script_output(script_tags, env, state, output_key, dump_file): + + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'env': env, + 'state': state, + 'quiet': True, + 'silent': True, + } + r = cm.access(cm_input) + if r['return'] > 0: + return r + with open(dump_file, "w") as f: + f.write(json.dumps(r[output_key], indent=2)) + + return {'return': 0} diff --git a/script/app-mlperf-inference/dockerfiles/README.md b/script/app-mlperf-inference/dockerfiles/README.md new file mode 100644 index 0000000000..9449b2e8c9 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/README.md @@ -0,0 +1,2 @@ +# Example for running docker +docker buil diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..be579054d7 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_onnx-fp32 +#RUN cm run script --tags=get,generic-python-lib,_tokenization + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..ead7e4d40e --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/rhel_9_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_tf +#RUN cm run script --tags=get,generic-python-lib,_tokenization +#RUN cm run script --tags=get,generic-python-lib,_protobuf + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..83c482fde3 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_onnx-fp32 +#RUN cm run script --tags=get,generic-python-lib,_tokenization + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..ce6715d2f5 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_18.04_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_tf +#RUN cm run script --tags=get,generic-python-lib,_tokenization +#RUN cm run script --tags=get,generic-python-lib,_protobuf + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..452327ed3c --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_onnx-fp32 +#RUN cm run script --tags=get,generic-python-lib,_tokenization + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..10f77f28a6 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_20.04_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_tf +#RUN cm run script --tags=get,generic-python-lib,_tokenization +#RUN cm run script --tags=get,generic-python-lib,_protobuf + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..dbc05168f1 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_onnx-fp32 +#RUN cm run script --tags=get,generic-python-lib,_tokenization + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..d94cc74d39 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/bert-99.9/ubuntu_22.04_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_transformers +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_deeplearningexamples,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,squad,original +#RUN cm run script --tags=get,ml-model,language-processing,bert,_tf +#RUN cm run script --tags=get,generic-python-lib,_tokenization +#RUN cm run script --tags=get,generic-python-lib,_protobuf + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_bert-99.9,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/_info.md b/script/app-mlperf-inference/dockerfiles/resnet50/_info.md new file mode 100644 index 0000000000..b956a7f990 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/_info.md @@ -0,0 +1,4 @@ +### 20240410 + +We moved onnxruntime-linux-aarch64-1.12.1.tgz to https://cKnowledge.org/ai/data/onnxruntime-linux-aarch64-1.12.1.tgz +to reduce the size of this repository. diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..544ce4904d --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..e6e57260c2 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..64de129f68 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_pytorch_cpu.Dockerfile @@ -0,0 +1,42 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN +ARG CM_MLPERF_LOADGEN_MODE=accuracy +ARG CM_MLPERF_LOADGEN_SCENARIO=Offline +ARG CM_TEST_QUERY_COUNT=10 + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +# Install/customize individual CM components for MLPerf +#RUN cm run script --tags=get,generic-python-lib,_pytorch +#RUN cm run script --tags=get-ml-model,resnet50,_pytorch +#RUN cm run script --tags=get,dataset,preprocessed,imagenet + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..7dfc8e2faa --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/rhel_9_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NHWC +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_tensorflow +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..74dded5051 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..8980352f1f --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..14d371e911 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_pytorch_cpu.Dockerfile @@ -0,0 +1,42 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN +ARG CM_MLPERF_LOADGEN_MODE=accuracy +ARG CM_MLPERF_LOADGEN_SCENARIO=Offline +ARG CM_TEST_QUERY_COUNT=10 + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +# Install/customize individual CM components for MLPerf +#RUN cm run script --tags=get,generic-python-lib,_pytorch +#RUN cm run script --tags=get-ml-model,resnet50,_pytorch +#RUN cm run script --tags=get,dataset,preprocessed,imagenet + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..b659014255 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_18.04_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NHWC +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_tensorflow +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..1835af9d13 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..701cbbe782 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..2f1a41dfbb --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_pytorch_cpu.Dockerfile @@ -0,0 +1,42 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN +ARG CM_MLPERF_LOADGEN_MODE=accuracy +ARG CM_MLPERF_LOADGEN_SCENARIO=Offline +ARG CM_TEST_QUERY_COUNT=10 + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +# Install/customize individual CM components for MLPerf +#RUN cm run script --tags=get,generic-python-lib,_pytorch +#RUN cm run script --tags=get-ml-model,resnet50,_pytorch +#RUN cm run script --tags=get,dataset,preprocessed,imagenet + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..400890fcf1 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_20.04_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NHWC +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_tensorflow +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..dfd05a56da --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..9264a0cb38 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NCHW +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..b01946b2e4 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_pytorch_cpu.Dockerfile @@ -0,0 +1,42 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN +ARG CM_MLPERF_LOADGEN_MODE=accuracy +ARG CM_MLPERF_LOADGEN_SCENARIO=Offline +ARG CM_TEST_QUERY_COUNT=10 + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +# Install/customize individual CM components for MLPerf +#RUN cm run script --tags=get,generic-python-lib,_pytorch +#RUN cm run script --tags=get-ml-model,resnet50,_pytorch +#RUN cm run script --tags=get,dataset,preprocessed,imagenet + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,reference,_resnet50,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_tensorflow_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_tensorflow_cpu.Dockerfile new file mode 100644 index 0000000000..0f598efcc2 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/resnet50/ubuntu_22.04_python_tensorflow_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_tensorflow +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,image-classification,imagenet,preprocessed,_NHWC +#RUN cm run script --tags=get,dataset-aux,image-classification,imagenet-aux +#RUN cm run script --tags=get,ml-model,image-classification,resnet50,_tensorflow +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_resnet50,_tensorflow,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/_test.sh b/script/app-mlperf-inference/dockerfiles/retinanet/_test.sh new file mode 100644 index 0000000000..00326dd5db --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/_test.sh @@ -0,0 +1,2 @@ +cm run script --tags=app,mlperf,inference,generic,reference,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --regenerate_files -v --time + diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..379d263689 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..474cc8a572 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..e42fa71cc5 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/rhel_9_python_pytorch_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_torchvision +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_pytorch +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..955a6ba828 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..823759cd39 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..7eff2307f6 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_18.04_python_pytorch_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_torchvision +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_pytorch +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..538176ed8a --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..8ea38a9ae5 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..7d05ba21fe --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_20.04_python_pytorch_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_torchvision +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_pytorch +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..c0dcc0bf0d --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_cpp_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_cpp --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile new file mode 100644 index 0000000000..47cbcbf2af --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_onnxruntime_cpu.Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_onnxruntime +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_onnx +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_onnxruntime,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_pytorch_cpu.Dockerfile b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_pytorch_cpu.Dockerfile new file mode 100644 index 0000000000..e6f92025c8 --- /dev/null +++ b/script/app-mlperf-inference/dockerfiles/retinanet/ubuntu_22.04_python_pytorch_cpu.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +#RUN cm run script --tags=detect,os +#RUN cm run script --tags=detect,cpu +#RUN cm run script --tags=get,sys-utils-cm +#RUN cm run script --tags=get,python +#RUN cm run script --tags=get,generic-python-lib,_torch +#RUN cm run script --tags=get,generic-python-lib,_torchvision +#RUN cm run script --tags=get,loadgen +#RUN cm run script --tags=get,mlcommons,inference,src,_octoml +#RUN cm run script --tags=get,sut,configs +#RUN cm run script --tags=get,dataset,object-detection,open-images,openimages,preprocessed,_validation,_NCHW,_50 +#RUN cm run script --tags=get,ml-model,object-detection,resnext50,fp32,_pytorch +#RUN cm run script --tags=get,generic-python-lib,_opencv-python +#RUN cm run script --tags=get,generic-python-lib,_numpy +#RUN cm run script --tags=get,generic-python-lib,_pycocotools + +# Run CM workflow for MLPerf inference +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml --fake_run +RUN cm run script --tags=app,mlperf,inference,generic,_retinanet,_pytorch,_cpu,_python --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml diff --git a/script/app-mlperf-inference/run.sh b/script/app-mlperf-inference/run.sh new file mode 100644 index 0000000000..1d0c1244c7 --- /dev/null +++ b/script/app-mlperf-inference/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cmd="${CMD}" +if [[ -n ${cmd} ]]; then + echo "$cmd" + eval "$cmd" + test $? -eq 0 || exit $? +fi diff --git a/script/app-mlperf-inference/run_config.yml b/script/app-mlperf-inference/run_config.yml new file mode 100644 index 0000000000..03ec8b0273 --- /dev/null +++ b/script/app-mlperf-inference/run_config.yml @@ -0,0 +1,11 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests +variation_combinations: + - _bert,_nvidia-original: + minimum_system_requirements: + ram: 4 #in GB + disk_space: 6 #in GB diff --git a/script/app-mlperf-inference/verify_accuracy.sh b/script/app-mlperf-inference/verify_accuracy.sh new file mode 100644 index 0000000000..5a8cec92a9 --- /dev/null +++ b/script/app-mlperf-inference/verify_accuracy.sh @@ -0,0 +1,4 @@ +#/bin/bash +echo "Running: $CMD" +eval $CMD +test $? -eq 0 || exit $? diff --git a/script/app-mlperf-training-nvidia/README.md b/script/app-mlperf-training-nvidia/README.md new file mode 100644 index 0000000000..fcc4a83f94 --- /dev/null +++ b/script/app-mlperf-training-nvidia/README.md @@ -0,0 +1,240 @@ +Automatically generated README for this automation recipe: **app-mlperf-training-nvidia** + +Category: **Modular MLPerf training benchmark pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-training-nvidia,1e2e357618cc4674) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *app,vision,language,mlcommons,mlperf,training,nvidia* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf training nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,vision,language,mlcommons,mlperf,training,nvidia` + +`cm run script --tags=app,vision,language,mlcommons,mlperf,training,nvidia[,variations] [--input_flags]` + +*or* + +`cmr "app vision language mlcommons mlperf training nvidia"` + +`cmr "app vision language mlcommons mlperf training nvidia [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,training,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,vision,language,mlcommons,mlperf,training,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,vision,language,mlcommons,mlperf,training,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app vision language mlcommons mlperf training nvidia[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_bert` + - Environment variables: + - *CM_MLPERF_MODEL*: `bert` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_protobuf + * `if (CM_MLPERF_BACKEND in ['tf', 'tflite'])` + * CM names: `--adr.['protobuf']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + * CM names: `--adr.['ml-engine-pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cuda`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cuda` + - *USE_CUDA*: `True` + - Workflow: + * `_tpu` + - Environment variables: + - *CM_MLPERF_DEVICE*: `tpu` + - *CUDA_VISIBLE_DEVICES*: `` + - *USE_CUDA*: `False` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_tf` + - Aliases: `_tensorflow` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tf` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + +
+ + +#### Default variations + +`_cuda` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` +* `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` +* `--hw_name=value` → `CM_HW_NAME=value` +* `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` +* `--num_threads=value` → `CM_NUM_THREADS=value` +* `--output_dir=value` → `OUTPUT_BASE_DIR=value` +* `--rerun=value` → `CM_RERUN=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "clean":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlperf,training,src + * CM names: `--adr.['training-src', 'mlperf-training-src']...` + - CM script: [get-mlperf-training-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-src) + * get,git,repo,_repo.https://github.com/mlcommons/training_results_v2.1 + * CM names: `--adr.['training-results', 'mlperf-training-results']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * get,cuda + * `if (CM_MLPERF_DEVICE == cuda)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,generic-python-lib,_torchvision_cuda + * `if (CM_MLPERF_BACKEND == pytorch AND CM_MLPERF_DEVICE == cuda)` + * CM names: `--adr.['ml-engine-torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_mlperf_logging + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * prepare,mlperf,training,data,bert,_nvidia + * `if (CM_MLPERF_MODEL == bert)` + * CM names: `--adr.['prepare-data', 'bert-model']...` + - CM script: [prepare-training-data-bert](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/prepare-training-data-bert) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/_cm.yaml) + 1. ***Run native script if exists*** + * [run-bert-training.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/run-bert-training.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-nvidia/_cm.yaml) + +___ +### Script output +`cmr "app vision language mlcommons mlperf training nvidia [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize diff --git a/script/app-mlperf-training-nvidia/_cm.yaml b/script/app-mlperf-training-nvidia/_cm.yaml new file mode 100644 index 0000000000..a2fad3584f --- /dev/null +++ b/script/app-mlperf-training-nvidia/_cm.yaml @@ -0,0 +1,156 @@ +# Identification of this CM script +alias: app-mlperf-training-nvidia +uid: 1e2e357618cc4674 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf training benchmark pipeline" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - training + - nvidia + +# Default environment +default_env: + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia + +# Map script inputs to environment variables +input_mapping: + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + num_threads: CM_NUM_THREADS + model: CM_MLPERF_CUSTOM_MODEL_PATH + output_dir: OUTPUT_BASE_DIR + rerun: CM_RERUN + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + # Get MLPerf training source + - tags: get,mlperf,training,src + names: + - training-src + - mlperf-training-src + + + - tags: get,git,repo,_repo.https://github.com/mlcommons/training_results_v2.1 + extra_cache_tags: mlperf,training,results,v2.1 + names: + - training-results + - mlperf-training-results + + # Detect CUDA if required + - tags: get,cuda + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + + + ## Torchvision (CUDA) + - tags: get,generic-python-lib,_torchvision_cuda + names: + - ml-engine-torchvision + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cuda + + - tags: get,generic-python-lib,_mlperf_logging + + + ######################################################################## + # Prepare Data + + ## BERT + - tags: prepare,mlperf,training,data,bert,_nvidia + names: + - prepare-data + - bert-model + enable_if_env: + CM_MLPERF_MODEL: + - bert + + + +# Variations to customize dependencies +variations: + pytorch: + group: framework + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + + tf: + group: framework + env: + CM_MLPERF_BACKEND: tf + CM_MLPERF_BACKEND_VERSION: <<>> + + tensorflow: + alias: tf + + # Reference MLPerf models + bert: + env: + CM_MLPERF_MODEL: bert + deps: + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "3.19" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + - tags: get,generic-python-lib,_torch + names: + - ml-engine-pytorch + tpu: + group: device + env: + CM_MLPERF_DEVICE: tpu + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: no + + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: cuda + USE_CUDA: yes diff --git a/script/app-mlperf-training-nvidia/customize.py b/script/app-mlperf-training-nvidia/customize.py new file mode 100644 index 0000000000..3c5fdf6d8a --- /dev/null +++ b/script/app-mlperf-training-nvidia/customize.py @@ -0,0 +1,57 @@ +from cmind import utils +import os +import json +import shutil +import subprocess + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return':0} + + if env.get('CM_MLPERF_POWER','') == "yes": + power = "yes" + else: + power = "no" + + rerun = True if env.get("CM_RERUN","")!='' else False + + if 'CM_MLPERF_MODEL' not in env: + return {'return': 1, 'error': "Please select a variation specifying the model to run"} + + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") + + + NUM_THREADS = env['CM_NUM_THREADS'] + + if "bert" in env['CM_MLPERF_MODEL']: + env['CM_RUN_DIR'] = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "NVIDIA", "benchmarks", "bert", "implementations", "pytorch-22.09") + + if "resnet" in env['CM_MLPERF_MODEL']: + env['CM_RUN_DIR'] = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "NVIDIA", "benchmarks", "resnet", "implementations", "mxnet-22.04") + + env['CM_RESULTS_DIR'] = os.getcwd() + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/app-mlperf-training-nvidia/run-bert-training.sh b/script/app-mlperf-training-nvidia/run-bert-training.sh new file mode 100644 index 0000000000..1515404f36 --- /dev/null +++ b/script/app-mlperf-training-nvidia/run-bert-training.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +source ./config_DGXA100_1x8x56x1.sh +results_dir=${CM_RESULTS_DIR} +cmd="CONT=mlperf-nvidia:language_model DATADIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength DATADIR_PHASE2=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength EVALDIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/eval_varlength/ CHECKPOINTDIR=${results_dir} CHECKPOINTDIR_PHASE1=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1 ./run_with_docker.sh" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + diff --git a/script/app-mlperf-training-nvidia/run.sh b/script/app-mlperf-training-nvidia/run.sh new file mode 100644 index 0000000000..2f15ea73b5 --- /dev/null +++ b/script/app-mlperf-training-nvidia/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cmd="cd ${CM_RUN_DIR}" +echo "$cmd" +eval "$cmd" + +if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh + test $? -eq 0 || exit $? +fi diff --git a/script/app-mlperf-training-reference/README.md b/script/app-mlperf-training-reference/README.md new file mode 100644 index 0000000000..479f01d0eb --- /dev/null +++ b/script/app-mlperf-training-reference/README.md @@ -0,0 +1,238 @@ +Automatically generated README for this automation recipe: **app-mlperf-training-reference** + +Category: **Modular MLPerf training benchmark pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-mlperf-training-reference,0c4b11bdcf494b4f) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *app,vision,language,mlcommons,mlperf,training,reference,ref* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf training reference ref" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,vision,language,mlcommons,mlperf,training,reference,ref` + +`cm run script --tags=app,vision,language,mlcommons,mlperf,training,reference,ref[,variations] [--input_flags]` + +*or* + +`cmr "app vision language mlcommons mlperf training reference ref"` + +`cmr "app vision language mlcommons mlperf training reference ref [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,training,reference,ref' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,vision,language,mlcommons,mlperf,training,reference,ref"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,vision,language,mlcommons,mlperf,training,reference,ref) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app vision language mlcommons mlperf training reference ref[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_bert` + - Environment variables: + - *CM_MLPERF_MODEL*: `bert` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_protobuf + * `if (CM_MLPERF_BACKEND in ['tf', 'tflite'])` + * CM names: `--adr.['protobuf']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + * CM names: `--adr.['ml-engine-pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cuda`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cuda` + - *USE_CUDA*: `True` + - Workflow: + * `_tpu` + - Environment variables: + - *CM_MLPERF_DEVICE*: `tpu` + - *CUDA_VISIBLE_DEVICES*: `` + - *USE_CUDA*: `False` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_pytorch` + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + * `_tf` + - Aliases: `_tensorflow` + - Environment variables: + - *CM_MLPERF_BACKEND*: `tf` + - *CM_MLPERF_BACKEND_VERSION*: `<<>>` + - Workflow: + +
+ + +#### Default variations + +`_cuda` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` +* `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` +* `--hw_name=value` → `CM_HW_NAME=value` +* `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` +* `--num_threads=value` → `CM_NUM_THREADS=value` +* `--output_dir=value` → `OUTPUT_BASE_DIR=value` +* `--rerun=value` → `CM_RERUN=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "clean":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference` +* CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: `` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlperf,training,src + * CM names: `--adr.['training-src']...` + - CM script: [get-mlperf-training-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-src) + * get,cuda + * `if (CM_MLPERF_DEVICE == cuda)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,generic-python-lib,_torchvision_cuda + * `if (CM_MLPERF_BACKEND == pytorch AND CM_MLPERF_DEVICE == cuda)` + * CM names: `--adr.['ml-engine-torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_mlperf_logging + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * prepare,mlperf,training,data,bert,_reference + * `if (CM_MLPERF_MODEL == bert)` + * CM names: `--adr.['prepare-data', 'bert-model']...` + - CM script: [prepare-training-data-bert](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/prepare-training-data-bert) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/_cm.yaml) + 1. ***Run native script if exists*** + * [run-bert-training.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/run-bert-training.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-mlperf-training-reference/_cm.yaml) + +___ +### Script output +`cmr "app vision language mlcommons mlperf training reference ref [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize diff --git a/script/app-mlperf-training-reference/_cm.yaml b/script/app-mlperf-training-reference/_cm.yaml new file mode 100644 index 0000000000..56b4ad05da --- /dev/null +++ b/script/app-mlperf-training-reference/_cm.yaml @@ -0,0 +1,150 @@ +# Identification of this CM script +alias: app-mlperf-training-reference +uid: 0c4b11bdcf494b4f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf training benchmark pipeline" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - training + - reference + - ref + +# Default environment +default_env: + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' + +# Map script inputs to environment variables +input_mapping: + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + num_threads: CM_NUM_THREADS + model: CM_MLPERF_CUSTOM_MODEL_PATH + output_dir: OUTPUT_BASE_DIR + rerun: CM_RERUN + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + # Get MLPerf training source + - tags: get,mlperf,training,src + names: + - training-src + + # Detect CUDA if required + - tags: get,cuda + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + + + ## Torchvision (CUDA) + - tags: get,generic-python-lib,_torchvision_cuda + names: + - ml-engine-torchvision + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cuda + + - tags: get,generic-python-lib,_mlperf_logging + + + ######################################################################## + # Prepare Data + + ## BERT + - tags: prepare,mlperf,training,data,bert,_reference + names: + - prepare-data + - bert-model + enable_if_env: + CM_MLPERF_MODEL: + - bert + + + +# Variations to customize dependencies +variations: + pytorch: + group: framework + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + + tf: + group: framework + env: + CM_MLPERF_BACKEND: tf + CM_MLPERF_BACKEND_VERSION: <<>> + + tensorflow: + alias: tf + + # Reference MLPerf models + bert: + env: + CM_MLPERF_MODEL: bert + deps: + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "3.19" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + - tags: get,generic-python-lib,_torch + names: + - ml-engine-pytorch + tpu: + group: device + env: + CM_MLPERF_DEVICE: tpu + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: no + + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: cuda + USE_CUDA: yes diff --git a/script/app-mlperf-training-reference/customize.py b/script/app-mlperf-training-reference/customize.py new file mode 100644 index 0000000000..f7c77bc55a --- /dev/null +++ b/script/app-mlperf-training-reference/customize.py @@ -0,0 +1,52 @@ +from cmind import utils +import os +import json +import shutil +import subprocess + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return':0} + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return':0} + + if env.get('CM_MLPERF_POWER','') == "yes": + power = "yes" + else: + power = "no" + + rerun = True if env.get("CM_RERUN","")!='' else False + + if 'CM_MLPERF_MODEL' not in env: + return {'return': 1, 'error': "Please select a variation specifying the model to run"} + + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") + + + NUM_THREADS = env['CM_NUM_THREADS'] + + if "bert" in env['CM_MLPERF_MODEL']: + env['CM_RUN_DIR'] = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], "language_model", "tensorflow", "bert") + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/app-mlperf-training-reference/run-bert-training.sh b/script/app-mlperf-training-reference/run-bert-training.sh new file mode 100644 index 0000000000..08ed5b70ae --- /dev/null +++ b/script/app-mlperf-training-reference/run-bert-training.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +export TF_XLA_FLAGS='--tf_xla_auto_jit=2' +train_batch_size=24 +cmd="python run_pretraining.py \ + --bert_config_file=${CM_MLPERF_TRAINING_BERT_CONFIG_PATH} \ + --output_dir=/tmp/output/ \ + --input_file=${CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH}/part* \ + --nodo_eval \ + --do_train \ + --eval_batch_size=8 \ + --learning_rate=0.0001 \ + --init_checkpoint=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1/model.ckpt-28252 \ + --iterations_per_loop=1000 \ + --max_predictions_per_seq=76 \ + --max_seq_length=512 \ + --num_train_steps=107538 \ + --num_warmup_steps=1562 \ + --optimizer=lamb \ + --save_checkpoints_steps=6250 \ + --start_warmup_step=0 \ + --num_gpus=1 \ + --train_batch_size=${train_batch_size}" +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + diff --git a/script/app-mlperf-training-reference/run.sh b/script/app-mlperf-training-reference/run.sh new file mode 100644 index 0000000000..2f15ea73b5 --- /dev/null +++ b/script/app-mlperf-training-reference/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cmd="cd ${CM_RUN_DIR}" +echo "$cmd" +eval "$cmd" + +if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh + test $? -eq 0 || exit $? +fi diff --git a/script/app-stable-diffusion-onnx-py/README-extra.md b/script/app-stable-diffusion-onnx-py/README-extra.md new file mode 100644 index 0000000000..ecab8070eb --- /dev/null +++ b/script/app-stable-diffusion-onnx-py/README-extra.md @@ -0,0 +1,30 @@ +# Examples + +CM interface for https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/onnx + +```bash +cm run script "install python-venv" --name=sd-test +cm run script "get generic-python-lib _package.optimum[onnxruntime]" --adr.python.name=sd-test +cm run script "activate python-venv" --name=sd-test + +cm run script "python app stable-diffusion onnx" --adr.python.name=sd-test --text="crazy programmer" + +cm rm cache -f +cm run script "python app stable-diffusion onnx _cuda" --adr.python.name=sd-test --text="crazy programmer" + +cm docker script "python app stable-diffusion onnx" --text="crazy programmer" --output=. --docker_cm_repo=ctuning@mlcommons-ck --env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=xyz4 + +``` + + + +# Resources + +* https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0 +* https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main +* https://huggingface.co/CompVis/stable-diffusion-v1-4/tree/main +* https://huggingface.co/runwayml/stable-diffusion-v1-5 +* https://huggingface.co/bes-dev/stable-diffusion-v1-4-onnx +* https://onnxruntime.ai/docs/tutorials/csharp/stable-diffusion-csharp.html +* https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main +* https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models diff --git a/script/app-stable-diffusion-onnx-py/README.md b/script/app-stable-diffusion-onnx-py/README.md new file mode 100644 index 0000000000..48302cf769 --- /dev/null +++ b/script/app-stable-diffusion-onnx-py/README.md @@ -0,0 +1,201 @@ +Automatically generated README for this automation recipe: **app-stable-diffusion-onnx-py** + +Category: **Modular AI/ML application pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=app-stable-diffusion-onnx-py,4d33981ac3534b3b) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-stable-diffusion-onnx-py)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *modular,python,app,stable-diffusion,onnx* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "modular python app stable-diffusion onnx" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=modular,python,app,stable-diffusion,onnx` + +`cm run script --tags=modular,python,app,stable-diffusion,onnx[,variations] [--input_flags]` + +*or* + +`cmr "modular python app stable-diffusion onnx"` + +`cmr "modular python app stable-diffusion onnx [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + + +#### Input Flags + +* --**text**=Text to generate image +* --**output**=Output directory + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "text":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'modular,python,app,stable-diffusion,onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="modular,python,app,stable-diffusion,onnx"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=modular,python,app,stable-diffusion,onnx) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "modular python app stable-diffusion onnx[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**target**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *USE_CPU*: `True` + - *CM_DEVICE*: `cpu` + - Workflow: + * `_cuda` + - Environment variables: + - *USE_CUDA*: `True` + - *CM_DEVICE*: `cuda:0` + - Workflow: + +
+ + +#### Default variations + +`_cpu` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--output=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT=value` +* `--text=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "output":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cuda + * `if (USE_CUDA == True)` + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,cudnn + * `if (USE_CUDA == True)` + * CM names: `--adr.['cudnn']...` + - CM script: [get-cudnn](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cudnn) + * get,generic-python-lib,_package.optimum[onnxruntime] + * `if (USE_CUDA != True)` + * CM names: `--adr.['optimum']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.optimum[onnxruntime-gpu] + * `if (USE_CUDA == True)` + * CM names: `--adr.['optimum']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.diffusers + * CM names: `--adr.['diffusers']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,ml-model,huggingface,zoo,_model-stub.runwayml/stable-diffusion-v1-5 + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-stable-diffusion-onnx-py/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-stable-diffusion-onnx-py/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml) + +___ +### Script output +`cmr "modular python app stable-diffusion onnx [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/app-stable-diffusion-onnx-py/_cm.yaml b/script/app-stable-diffusion-onnx-py/_cm.yaml new file mode 100644 index 0000000000..306bebbb5f --- /dev/null +++ b/script/app-stable-diffusion-onnx-py/_cm.yaml @@ -0,0 +1,110 @@ +alias: app-stable-diffusion-onnx-py +uid: 4d33981ac3534b3b + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular AI/ML application pipeline" + +tags: +- app +- modular +- stable +- diffusion +- stable-diffusion +- onnx +- python + +tags_help: "modular python app stable-diffusion onnx" + + +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 + +- tags: get,cuda + names: + - cuda + enable_if_env: + USE_CUDA: + - yes +- tags: get,cudnn + names: + - cudnn + enable_if_env: + USE_CUDA: + - yes + + + + + + + +- tags: get,generic-python-lib,_package.optimum[onnxruntime] + names: + - optimum + skip_if_env: + USE_CUDA: + - yes + +- tags: get,generic-python-lib,_package.optimum[onnxruntime-gpu] + names: + - optimum + enable_if_env: + USE_CUDA: + - yes + +- tags: get,generic-python-lib,_package.diffusers + names: + - diffusers + + +- tags: get,ml-model,huggingface,zoo,_model-stub.runwayml/stable-diffusion-v1-5 + revision: onnx + model_filename: model_index.json + full_subfolder: . + + +variations: + cuda: + docker: + all_gpus: 'yes' + group: target + env: + USE_CUDA: yes + CM_DEVICE: cuda:0 + + cpu: + group: target + default: yes + env: + USE_CPU: yes + CM_DEVICE: cpu + +input_mapping: + text: CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT + output: CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT + + +input_description: + text: + desc: "Text to generate image" + output: + desc: "Output directory" + + +docker: + skip_run_cmd: 'no' + input_paths: + - output + add_quotes_to_keys: + - text + skip_input_for_fake_run: + - text + - output + - env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO diff --git a/script/app-stable-diffusion-onnx-py/process.py b/script/app-stable-diffusion-onnx-py/process.py new file mode 100644 index 0000000000..0f759089b7 --- /dev/null +++ b/script/app-stable-diffusion-onnx-py/process.py @@ -0,0 +1,34 @@ +# https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/onnx + +import os + +from optimum.onnxruntime import ORTStableDiffusionPipeline + +output = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT','') + +f = os.path.join(output, 'output.png') + +if os.path.isfile(f): + os.remove(f) + +cm_model_path = os.environ.get('CM_ML_MODEL_PATH','') +if cm_model_path == '': + print ('Error: CM_ML_MODEL_PATH env is not defined') + exit(1) + +device = os.environ.get('CM_DEVICE','') + +pipeline = ORTStableDiffusionPipeline.from_pretrained(cm_model_path, local_files_only=True).to(device) + +text = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT','') +if text == '': text = "a photo of an astronaut riding a horse on mars" + + +print ('') +print ('Generating imaged based on "{}"'.format(text)) + +image = pipeline(text).images[0] + +image.save(f) + +print ('Image recorded to "{}"'.format(f)) diff --git a/script/app-stable-diffusion-onnx-py/run.bat b/script/app-stable-diffusion-onnx-py/run.bat new file mode 100644 index 0000000000..fbcf3a07ef --- /dev/null +++ b/script/app-stable-diffusion-onnx-py/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\process.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/app-stable-diffusion-onnx-py/run.sh b/script/app-stable-diffusion-onnx-py/run.sh new file mode 100644 index 0000000000..efffec67f0 --- /dev/null +++ b/script/app-stable-diffusion-onnx-py/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/process.py +test $? -eq 0 || exit 1 diff --git a/script/benchmark-any-mlperf-inference-implementation/README.md b/script/benchmark-any-mlperf-inference-implementation/README.md new file mode 100644 index 0000000000..373ddc45c6 --- /dev/null +++ b/script/benchmark-any-mlperf-inference-implementation/README.md @@ -0,0 +1,270 @@ +Automatically generated README for this automation recipe: **benchmark-any-mlperf-inference-implementation** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=benchmark-any-mlperf-inference-implementation,8d3cd46f54464810) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models` + +`cm run script --tags=benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models[,variations] [--input_flags]` + +*or* + +`cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models"` + +`cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_aws-dl2q.24xlarge,qualcomm` + - Workflow: + * `_mini,power` + - Workflow: + * `_orin,power` + - Workflow: + * `_phoenix,nvidia` + - Workflow: + * `_phoenix,power` + - Workflow: + * `_phoenix,reference` + - Workflow: + * `_rb6,power` + - Workflow: + * `_rb6,qualcomm` + - Workflow: + * `_rpi4,power` + - Workflow: + * `_sapphire-rapids.24c,nvidia` + - Workflow: + +
+ + + * Group "**implementation**" +
+ Click here to expand this section. + + * `_deepsparse` + - Environment variables: + - *DIVISION*: `open` + - *IMPLEMENTATION*: `deepsparse` + - Workflow: + * `_intel` + - Environment variables: + - *IMPLEMENTATION*: `intel` + - Workflow: + * `_mil` + - Environment variables: + - *IMPLEMENTATION*: `mil` + - Workflow: + * `_nvidia` + - Environment variables: + - *IMPLEMENTATION*: `nvidia-original` + - Workflow: + * `_qualcomm` + - Environment variables: + - *IMPLEMENTATION*: `qualcomm` + - Workflow: + * `_reference` + - Environment variables: + - *IMPLEMENTATION*: `reference` + - Workflow: + * `_tflite-cpp` + - Environment variables: + - *IMPLEMENTATION*: `tflite_cpp` + - Workflow: + +
+ + + * Group "**power**" +
+ Click here to expand this section. + + * **`_performance-only`** (default) + - Workflow: + * `_power` + - Environment variables: + - *POWER*: `True` + - Workflow: + +
+ + + * Group "**sut**" +
+ Click here to expand this section. + + * `_aws-dl2q.24xlarge` + - Workflow: + * `_macbookpro-m1` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + * `_mini` + - Workflow: + * `_orin` + - Workflow: + * `_orin.32g` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + * `_phoenix` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + * `_rb6` + - Workflow: + * `_rpi4` + - Workflow: + * `_sapphire-rapids.24c` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + +
+ + +#### Default variations + +`_performance-only` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--backends=value` → `BACKENDS=value` +* `--category=value` → `CATEGORY=value` +* `--devices=value` → `DEVICES=value` +* `--division=value` → `DIVISION=value` +* `--extra_args=value` → `EXTRA_ARGS=value` +* `--models=value` → `MODELS=value` +* `--power_server=value` → `POWER_SERVER=value` +* `--power_server_port=value` → `POWER_SERVER_PORT=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "backends":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* DIVISION: `open` +* CATEGORY: `edge` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation/_cm.yaml)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation/_cm.yaml) + 1. ***Run native script if exists*** + * [run-template.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation/run-template.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-any-mlperf-inference-implementation/_cm.yaml) + +___ +### Script output +`cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/benchmark-any-mlperf-inference-implementation/_cm.yaml b/script/benchmark-any-mlperf-inference-implementation/_cm.yaml new file mode 100644 index 0000000000..5f1ae4ad65 --- /dev/null +++ b/script/benchmark-any-mlperf-inference-implementation/_cm.yaml @@ -0,0 +1,318 @@ +alias: benchmark-any-mlperf-inference-implementation +uid: 8d3cd46f54464810 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: MLPerf benchmark support + +deps: + - tags: detect,cpu + +default_env: + DIVISION: open + CATEGORY: edge + +input_mapping: + models: MODELS + backends: BACKENDS + devices: DEVICES + division: DIVISION + category: CATEGORY + power_server: POWER_SERVER + power_server_port: POWER_SERVER_PORT + extra_args: EXTRA_ARGS + +tags: + - benchmark + - run + - natively + - all + - inference + - any + - mlperf + - mlperf-implementation + - implementation + - mlperf-models + +variations: + + mil: + group: implementation + env: + IMPLEMENTATION: mil + default_env: + MODELS: resnet50,retinanet + BACKENDS: onnxruntime + DEVICES: cpu,cuda + + reference: + group: implementation + env: + IMPLEMENTATION: reference + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + CATEGORY: edge + + nvidia: + group: implementation + env: + IMPLEMENTATION: nvidia-original + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,rnnt,gptj-99,gptj-99.9,dlrmv2-99,dlrmv2-99.9 + BACKENDS: tensorrt + DEVICES: cuda + + qualcomm: + group: implementation + env: + IMPLEMENTATION: qualcomm + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9 + DIVISION: closed + BACKENDS: glow + DEVICES: qaic + + intel: + group: implementation + env: + IMPLEMENTATION: intel + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + DIVISION: closed + BACKENDS: pytorch + DEVICES: cpu + + deepsparse: + group: implementation + env: + DIVISION: open + IMPLEMENTATION: deepsparse + default_env: + MODELS: bert-99 + BACKENDS: deepsparse + DEVICES: cpu + + tflite-cpp: + group: implementation + env: + IMPLEMENTATION: tflite_cpp + default_env: + MODELS: mobilenets + CATEGORY: edge + DIVISION: open + BACKENDS: tflite + DEVICES: cpu + + performance-only: + group: power + default: true + + power: + group: power + env: + POWER: yes + default_env: + POWER_SERVER: 192.168.1.166 + POWER_SERVER_PORT: 4950 + + phoenix,power: + default_env: + POWER_SERVER: 192.168.1.79 + rb6,power: + default_env: + POWER_SERVER: 192.168.1.166 + orin,power: + default_env: + POWER_SERVER: 192.168.1.166 + rpi4,power: + default_env: + POWER_SERVER: 192.168.1.166 + mini,power: + default_env: + POWER_SERVER: 192.168.1.166 + rb6: + group: sut + orin: + group: sut + rpi4: + group: sut + mini: + group: sut + phoenix: + group: sut + env: + CATEGORY: edge + DIVISION: closed + state: + resnet50: + cpu: + onnxruntime: + offline_target_qps: 250 + + phoenix,reference: + default_env: + DEVICES: cpu,cuda + + phoenix,nvidia: + default_env: + EXTRA_ARGS: " --gpu_name=rtx_4090" + state: + resnet50: + cuda: + tensorrt: + offline_target_qps: 45000 + retinanet: + cuda: + tensorrt: + offline_target_qps: 850 + bert-99: + cuda: + tensorrt: + offline_target_qps: 4000 + bert-99.9: + cuda: + tensorrt: + offline_target_qps: 2000 + 3d-unet-99.9: + cuda: + tensorrt: + offline_target_qps: 4 + rnnt: + cuda: + tensorrt: + offline_target_qps: 15000 + gptj-99: + cuda: + tensorrt: + offline_target_qps: 4.5 + + sapphire-rapids.24c,nvidia: + default_env: + EXTRA_ARGS: " --gpu_name=rtx_4090 --adr.mlperf-inference-implementation.tags=_num-gpus.2" + state: + resnet50: + cuda: + tensorrt: + offline_target_qps: 90000 + server_target_qps: 75000 + retinanet: + cuda: + tensorrt: + offline_target_qps: 1700 + server_target_qps: 1600 + bert-99: + cuda: + tensorrt: + offline_target_qps: 8000 + bert-99.9: + cuda: + tensorrt: + offline_target_qps: 4000 + server_target_qps: 3300 + 3d-unet-99.9: + cuda: + tensorrt: + offline_target_qps: 8 + singlestream_target_latency: 400 + 3d-unet-99.9: + cuda: + tensorrt: + offline_target_qps: 8 + singlestream_target_latency: 400 + rnnt: + cuda: + tensorrt: + offline_target_qps: 30000 + server_target_qps: 28200 + gptj-99: + cuda: + tensorrt: + offline_target_qps: 9 + server_target_qps: 8.2 + gptj-99.9: + cuda: + tensorrt: + offline_target_qps: 9 + server_target_qps: 8.2 + dlrm-v2-99: + cuda: + tensorrt: + offline_target_qps: 1500 + offline_target_qps: 1200 + dlrm-v2-99.9: + cuda: + tensorrt: + offline_target_qps: 1500 + offline_target_qps: 1200 + + orin.32g: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + sapphire-rapids.24c: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + macbookpro-m1: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + aws-dl2q.24xlarge: + group: sut + default_env: + EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_dl2q.24xlarge" + CATEGORY: datacenter + DIVISION: closed + default_variations: + implementation: qualcomm + + aws-dl2q.24xlarge,qualcomm: + state: + resnet50: + qaic: + glow: + offline_target_qps: 153000 + server_target_qps: 149000 + retinanet: + qaic: + glow: + offline_target_qps: 2500 + server_target_qps: 2200 + bert-99.9: + qaic: + glow: + offline_target_qps: 350 + server_target_qps: 300 + rb6: + group: sut + default_env: + CATEGORY: edge + DIVISION: closed + default_variations: + implementation: qualcomm + + rb6,qualcomm: + default_env: + EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_rb6 --env.CM_MLPERF_SHORT_RANGING_RUN=no" + state: + resnet50: + qaic: + glow: + offline_target_qps: 6800 + retinanet: + qaic: + glow: + offline_target_qps: 125 + bert-99: + qaic: + glow: + offline_target_qps: 255 diff --git a/script/benchmark-any-mlperf-inference-implementation/customize.py b/script/benchmark-any-mlperf-inference-implementation/customize.py new file mode 100644 index 0000000000..ae6462118b --- /dev/null +++ b/script/benchmark-any-mlperf-inference-implementation/customize.py @@ -0,0 +1,158 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + meta = i['meta'] + script_path = i['run_script_input']['path'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + models = env['MODELS'].split(",") + + input_backends = env.get('BACKENDS') + if input_backends: + input_backends = input_backends.split(",") + + devices = env.get('DEVICES') + if devices: + devices = devices.split(",") + + implementation = env['IMPLEMENTATION'] + + power = env.get('POWER', '') + + if str(power).lower() in [ "yes", "true" ]: + POWER_STRING = " --power=yes --adr.mlperf-power-client.power_server=" + env.get('POWER_SERVER', '192.168.0.15') + " --adr.mlperf-power-client.port=" + str(env.get('POWER_SERVER_PORT', '4950')) + " " + else: + POWER_STRING = "" + + if not devices: + return {'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} + + cmds = [] + run_script_content = '#!/bin/bash\n\n' + run_script_content += "POWER_STRING=\"" +POWER_STRING +"\"\n" + run_script_content += "DIVISION=\"" + env['DIVISION'] +"\"\n" + run_script_content += "CATEGORY=\"" + env['CATEGORY'] +"\"\n" + run_script_content += "EXTRA_ARGS=\"" + env.get('EXTRA_ARGS', '') +"\"\n" + run_script_content += 'source '+ os.path.join(script_path, "run-template.sh") + "\nPOWER_STRING=\"" +POWER_STRING +"\"\n\n" + + run_file_name = 'tmp-'+implementation+'-run' + + for model in models: + env['MODEL'] = model + + if "mobilenets" in model: + cmd = 'export extra_option=""' + cmds.append(cmd) + cmd = 'export extra_tags=""' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + cmd = 'export extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on"' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + + if env.get('CM_HOST_CPU_ARCHITECTURE', '') == "aarch64": + extra_tags=",_armnn,_use-neon" + cmd = f'export extra_tags="{extra_tags}"' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + cmd = 'export extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on"' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + + continue + + if not input_backends: + backends = None + if implementation == "reference": + if model == "resnet50": + backends = "tf,onnxruntime" + elif model == "retinanet": + backends = "onnxruntime,pytorch" + elif "bert" in model: + backends = "tf,onnxruntime,pytorch" + elif "3d-unet" in model: + backends = "tf,onnxruntime,pytorch" + elif model == "rnnt": + backends = "pytorch" + elif "gptj" in model: + backends = "pytorch" + elif "stable-diffusion-xl" in model: + backends = "pytorch" + elif "llama2-70b" in model: + backends = "pytorch" + if not backends: + return {'return': 1, 'error': f'No backend specified for the model: {model}.'} + backends = backends.split(",") + + else: + backends = input_backends + + for backend in backends: + + for device in devices: + add_to_run_cmd = '' + offline_target_qps = (((state.get(model, {})).get(device, {})).get(backend, {})).get('offline_target_qps') + if offline_target_qps: + add_to_run_cmd += f" --offline_target_qps={offline_target_qps}" + server_target_qps = (((state.get(model, {})).get(device, {})).get(backend, {})).get('server_target_qps') + if server_target_qps: + add_to_run_cmd += f" --server_target_qps={server_target_qps}" + + else: #try to do a test run with reasonable number of samples to get and record the actual system performance + if device == "cpu": + if model == "resnet50": + test_query_count = 1000 + else: + test_query_count = 100 + else: + if model == "resnet50": + test_query_count = 40000 + else: + test_query_count = 2000 + cmd = f'run_test "{model}" "{backend}" "{test_query_count}" "{implementation}" "{device}" "$find_performance_cmd"' + cmds.append(cmd) + #second argument is unused for submission_cmd + cmd = f'run_test "{model}" "{backend}" "100" "{implementation}" "{device}" "$submission_cmd" "{add_to_run_cmd}"' + + singlestream_target_latency = (((state.get(model, {})).get(device, {})).get(backend, {})).get('singlestream_target_latency') + if singlestream_target_latency: + cmd += f" --singlestream_target_latency={singlestream_target_latency}" + + cmds.append(cmd) + + run_script_content += "\n\n" +"\n\n".join(cmds) + + with open(os.path.join(script_path, run_file_name+".sh"), 'w') as f: + f.write(run_script_content) + print(run_script_content) + + run_script_input = i['run_script_input'] + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':run_file_name}) + + if r['return']>0: return r + + return {'return':0} + +def assemble_tflite_cmds(cmds): + cmd = 'run "$tflite_accuracy_cmd"' + cmds.append(cmd) + cmd = 'run "$tflite_performance_cmd"' + cmds.append(cmd) + cmd = 'run "$tflite_readme_cmd"' + cmds.append(cmd) + return + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/script/benchmark-any-mlperf-inference-implementation/run-template.sh new file mode 100644 index 0000000000..17c1ffa002 --- /dev/null +++ b/script/benchmark-any-mlperf-inference-implementation/run-template.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division=$DIVISION +model=$MODEL +device=$DEVICE +category=$CATEGORY +rerun=$RERUN + +function run_test() { + model=$1 + backend=$2 + test_query_count=$3 + implementation=$4 + device=$5 + EXTRA_RUN_ARGS=$7 + echo "model=$model, backend=$2, test_query_count=$3, implementation=$4, device=$5, EXTRA_RUN_ARGS=$7" + run "$6" +} + +#power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +results_dir=$HOME/results_dir + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun ${EXTRA_ARGS}' + +find_ss_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=SingleStream --quiet --test_query_count=$test_query_count $rerun ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + + +tflite_accuracy_cmd='cm run script --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +--adr.compiler.tags=gcc \ +${extra_option} \ + ${EXTRA_ARGS}' + +tflite_performance_cmd='cm run script --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +${POWER_STRING} \ +--adr.compiler.tags=gcc \ +${extra_option} \ + ${EXTRA_ARGS}' + +tflite_readme_cmd='cm run script --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +${POWER_STRING} \ +--adr.compiler.tags=gcc \ +${extra_option} \ + ${EXTRA_ARGS}' diff --git a/script/benchmark-program-mlperf/README.md b/script/benchmark-program-mlperf/README.md new file mode 100644 index 0000000000..14e32bb1ac --- /dev/null +++ b/script/benchmark-program-mlperf/README.md @@ -0,0 +1,153 @@ +Automatically generated README for this automation recipe: **benchmark-program-mlperf** + +Category: **Modular MLPerf inference benchmark pipeline** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=benchmark-program-mlperf,cfff0132a8aa4018) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program-mlperf)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *mlperf,benchmark-mlperf* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "mlperf benchmark-mlperf" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=mlperf,benchmark-mlperf` + +`cm run script --tags=mlperf,benchmark-mlperf[,variations] ` + +*or* + +`cmr "mlperf benchmark-mlperf"` + +`cmr "mlperf benchmark-mlperf [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'mlperf,benchmark-mlperf' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="mlperf,benchmark-mlperf"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=mlperf,benchmark-mlperf) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "mlperf benchmark-mlperf[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**power-mode**" +
+ Click here to expand this section. + + * **`_no-power`** (default) + - Workflow: + 1. ***Read "post_deps" on other CM scripts*** + * benchmark-program,program + * CM names: `--adr.['benchmark-program']...` + - CM script: [benchmark-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program) + * `_power` + - Environment variables: + - *CM_MLPERF_POWER*: `yes` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * benchmark-program,program + * CM names: `--adr.['benchmark-program']...` + - CM script: [benchmark-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program) + 1. ***Read "post_deps" on other CM scripts*** + * run,mlperf,power,client + * `if (CM_MLPERF_LOADGEN_MODE == performance)` + * CM names: `--adr.['mlperf-power-client']...` + - CM script: [run-mlperf-power-client](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-power-client) + +
+ + +#### Default variations + +`_no-power` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program-mlperf/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program-mlperf/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program-mlperf/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program-mlperf/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program-mlperf/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program-mlperf/_cm.json) + +___ +### Script output +`cmr "mlperf benchmark-mlperf [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/benchmark-program-mlperf/_cm.json b/script/benchmark-program-mlperf/_cm.json new file mode 100644 index 0000000000..b5ff5c6791 --- /dev/null +++ b/script/benchmark-program-mlperf/_cm.json @@ -0,0 +1,57 @@ +{ + "alias": "benchmark-program-mlperf", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Modular MLPerf inference benchmark pipeline", + "default_env": { + }, + "tags": [ + "mlperf", + "benchmark-mlperf" + ], + "uid": "cfff0132a8aa4018", + "variations": { + "power": { + "group": "power-mode", + "env": { + "CM_MLPERF_POWER": "yes" + }, + "new_env_keys": [ + "CM_MLPERF_*" + ], + "prehook_deps": [ + { + "names": [ + "benchmark-program" + ], + "tags": "benchmark-program,program" + } + ], + "post_deps": [ + { + "enable_if_env": { + "CM_MLPERF_LOADGEN_MODE": [ + "performance" + ] + }, + "names": [ + "mlperf-power-client" + ], + "tags": "run,mlperf,power,client" + } + ] + }, + "no-power": { + "group": "power-mode", + "default": true, + "post_deps": [ + { + "names": [ + "benchmark-program" + ], + "tags": "benchmark-program,program" + } + ] + } + } +} diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py new file mode 100644 index 0000000000..a333b6c078 --- /dev/null +++ b/script/benchmark-program-mlperf/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + env = i['env'] + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD') + + + if env.get('CM_MLPERF_POWER', '') == "yes": + if os_info['platform'] == 'windows': + return {'return':1, 'error':'TBD: this script is not yet supported on Windows'} + + if env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no': + os.system("echo '0' > "+env.get('CM_RUN_DIR','')+ "/count.txt") + env['CM_MLPERF_RUN_CMD'] = "CM_MLPERF_RUN_COUNT=\$(cat \${CM_RUN_DIR}/count.txt); echo \${CM_MLPERF_RUN_COUNT}; CM_MLPERF_RUN_COUNT=\$((CM_MLPERF_RUN_COUNT+1)); echo \${CM_MLPERF_RUN_COUNT} > \${CM_RUN_DIR}/count.txt && if [ \${CM_MLPERF_RUN_COUNT} -eq \'1\' ]; then export CM_MLPERF_USER_CONF=\${CM_MLPERF_RANGING_USER_CONF}; else export CM_MLPERF_USER_CONF=\${CM_MLPERF_TESTING_USER_CONF}; fi && "+env.get('CM_RUN_CMD','').strip() + else: + env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD','').strip() + + return {'return':0} diff --git a/script/benchmark-program/README.md b/script/benchmark-program/README.md new file mode 100644 index 0000000000..5cd6ce2cca --- /dev/null +++ b/script/benchmark-program/README.md @@ -0,0 +1,152 @@ +Automatically generated README for this automation recipe: **benchmark-program** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=benchmark-program,19f369ef47084895) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *benchmark,program* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "benchmark program" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=benchmark,program` + +`cm run script --tags=benchmark,program[,variations] ` + +*or* + +`cmr "benchmark program"` + +`cmr "benchmark program [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'benchmark,program' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="benchmark,program"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=benchmark,program) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "benchmark program[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_numactl` + - Workflow: + * `_numactl-interleave` + - Workflow: + * `_profile` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,profiler + - *Warning: no scripts found* + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_ENABLE_NUMACTL: `0` +* CM_ENABLE_PROFILING: `0` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * set,performance,mode,_performance + * `if (CM_SET_PERFORMANCE_MODE in ['on', 'yes', 'True', True])` + - CM script: [set-performance-mode](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-performance-mode) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/_cm.json) + 1. ***Run native script if exists*** + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/run-ubuntu.sh) + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/benchmark-program/_cm.json) + +___ +### Script output +`cmr "benchmark program [,variations]" -j` +#### New environment keys (filter) + +* `CM_RUN_CMD` +#### New environment keys auto-detected from customize + +* `CM_RUN_CMD` \ No newline at end of file diff --git a/script/benchmark-program/_cm.json b/script/benchmark-program/_cm.json new file mode 100644 index 0000000000..52844b7578 --- /dev/null +++ b/script/benchmark-program/_cm.json @@ -0,0 +1,61 @@ +{ + "alias": "benchmark-program", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "DevOps automation", + "default_env": { + "CM_ENABLE_NUMACTL": "0", + "CM_ENABLE_PROFILING": "0" + }, + "deps": [ + { + "tags": "detect,cpu" + }, + { + "tags": "set,performance,mode,_performance", + "enable_if_env": { + "CM_SET_PERFORMANCE_MODE": + [ + "on", + "yes", + "True", + true + ] + } + } + ], + "tags": [ + "program", + "benchmark", + "benchmark-program" + ], + "tags_help":"benchmark program", + "new_env_keys": [ + "CM_RUN_CMD" + ], + "uid": "19f369ef47084895", + "variations": { + "numactl": { + "default_env": { + "CM_ENABLE_NUMACTL": 1, + "CM_NUMACTL_MEMBIND": "--localalloc" + } + }, + "numactl-interleave": { + "default_env": { + "CM_ENABLE_NUMACTL": 1, + "CM_NUMACTL_MEMBIND": "--interleave=all" + } + }, + "profile": { + "default_env": { + "CM_ENABLE_PROFILING": 1 + }, + "deps": [ + { + "tags": "get,profiler" + } + ] + } + } +} diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py new file mode 100644 index 0000000000..188ffa6722 --- /dev/null +++ b/script/benchmark-program/customize.py @@ -0,0 +1,65 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + env = i['env'] + + if env.get('CM_RUN_CMD','') == '': + if env.get('CM_BIN_NAME','') == '': + x = 'run.exe' if os_info['platform'] == 'windows' else 'run.out' + env['CM_BIN_NAME'] = x + + if os_info['platform'] == 'windows': + env['CM_RUN_CMD'] = env.get('CM_RUN_PREFIX','') + env['CM_BIN_NAME'] + if env.get('CM_RUN_SUFFIX','')!='': + env['CM_RUN_CMD'] += ' '+env['CM_RUN_SUFFIX'] + + else: + if env['CM_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]: + env['CM_ENABLE_NUMACTL'] = "1" + CM_RUN_PREFIX = "numactl " + env['CM_NUMACTL_MEMBIND'] + ' ' + else: + CM_RUN_PREFIX = '' + + CM_RUN_PREFIX += env.get('CM_RUN_PREFIX', '') + + env['CM_RUN_PREFIX'] = CM_RUN_PREFIX + + CM_RUN_SUFFIX = (env['CM_REDIRECT_OUT'] + ' ') if 'CM_REDIRECT_OUT' in env else '' + CM_RUN_SUFFIX += (env['CM_REDIRECT_ERR'] + ' ') if 'CM_REDIRECT_ERR' in env else '' + + env['CM_RUN_SUFFIX'] = env['CM_RUN_SUFFIX'] + CM_RUN_SUFFIX if 'CM_RUN_SUFFIX' in env else CM_RUN_SUFFIX + + if env.get('CM_RUN_DIR','') == '': + env['CM_RUN_DIR'] = os.getcwd() + + + env['CM_RUN_CMD'] = CM_RUN_PREFIX + ' ' + os.path.join(env['CM_RUN_DIR'],env['CM_BIN_NAME']) + ' ' + env['CM_RUN_SUFFIX'] + + x = env.get('CM_RUN_PREFIX0','') + if x!='': + env['CM_RUN_CMD'] = x + ' ' + env.get('CM_RUN_CMD','') + + if os_info['platform'] != 'windows' and str(env.get('CM_SAVE_CONSOLE_LOG', True)).lower() not in [ "no", "false", "0"]: + logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) + env['CM_RUN_CMD'] += " 2>&1 | tee " + os.path.join(logs_dir, "console.out") + + # Print info + print ('***************************************************************************') + print ('CM script::benchmark-program/run.sh') + print ('') + print ('Run Directory: {}'.format(env.get('CM_RUN_DIR',''))) + + print ('') + print ('CMD: {}'.format(env.get('CM_RUN_CMD',''))) + + print ('') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/benchmark-program/run-ubuntu.sh b/script/benchmark-program/run-ubuntu.sh new file mode 100644 index 0000000000..1f19ed80b6 --- /dev/null +++ b/script/benchmark-program/run-ubuntu.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +cd ${CM_TMP_CURRENT_SCRIPT_PATH} +if [ ${CM_ENABLE_NUMACTL} == "1" ]; then + sudo apt-get install numactl +fi + +bash ./run.sh diff --git a/script/benchmark-program/run.bat b/script/benchmark-program/run.bat new file mode 100644 index 0000000000..d154493551 --- /dev/null +++ b/script/benchmark-program/run.bat @@ -0,0 +1,39 @@ +@echo off + +if "%CM_RUN_DIR%" == "" ( + echo CM_RUN_DIR is not set + exit 1 +) + +cd %CM_RUN_DIR% + +if "%CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" ( + echo ***************************************************** + echo You are now in Debug shell with pre-set CM env and can run the following command line manually: + + echo. + if not "%CM_RUN_CMD0%" == "" ( + echo %CM_RUN_CMD0% + ) else ( + echo %CM_RUN_CMD% + ) + + echo. + echo Type exit to return to CM script. + echo. + + cmd + + exit 0 +) + +rem Check CM_RUN_CMD0 +if not "%CM_RUN_CMD0%" == "" ( + echo. + %CM_RUN_CMD0% +) else ( + echo. + %CM_RUN_CMD% +) + +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/benchmark-program/run.sh b/script/benchmark-program/run.sh new file mode 100644 index 0000000000..8f2e0fa05a --- /dev/null +++ b/script/benchmark-program/run.sh @@ -0,0 +1,48 @@ +#!/bin/bash +if [[ ${CM_MLPERF_POWER} == "yes" && ${CM_MLPERF_LOADGEN_MODE} == "performance" ]]; then + exit 0 +fi + +# Run +if [ -z ${CM_RUN_DIR} ]; then + echo "CM_RUN_DIR is not set" + exit 1 +fi + +cd ${CM_RUN_DIR} + +if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then + echo "*****************************************************" + echo "You are now in Debug shell with pre-set CM env and can run the following command line manually:" + + echo "" + if [[ "${CM_RUN_CMD0}" != "" ]]; then + echo "${CM_RUN_CMD0}" + else + echo "${CM_RUN_CMD}" + fi + + echo "" + echo "Type exit to return to CM script." + echo "" +# echo "You can also run . ./debug-script-benchmark-program.sh to reproduce and customize run." +# echo "" +# +# cp -f tmp-run.sh debug-script-benchmark-program.sh +# +# sed -e 's/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="True"/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="False"/g' -i debug-script-benchmark-program.sh + + bash + + # do not re-run command below to pick up manual run! + exit 0 +fi + +# Check CM_RUN_CMD0 +if [[ "${CM_RUN_CMD0}" != "" ]]; then + eval ${CM_RUN_CMD0} +else + eval ${CM_RUN_CMD} +fi + +test $? -eq 0 || exit 1 diff --git a/script/build-docker-image/README-extra.md b/script/build-docker-image/README-extra.md new file mode 100644 index 0000000000..79b2c1b099 --- /dev/null +++ b/script/build-docker-image/README-extra.md @@ -0,0 +1,16 @@ +# Build CM Docker Image +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds a dockerfile with for using CM. + +## How to use +```bash +cm run script --tags=build,docker,image --dockerfile=[DOCKERFILEPATH] --gh_token=[GITHUB_AUTH_TOKEN] --image_repo=[IMAGE_REPO] --image_name=[IMAGE_NAME] --image_tag=[IMAGE_TAG] --cache=[yes,no] +``` +where +* `[DOCKERFILEPATH]` is the path to the dockerfile. If not given, the [dockerfile build script](../build-dockerfile) will be called. +* `[GITHUB_AUTH_TOKEN]`: is passed as a build argument to docker build. +* `[IMAGE_REPO]`: Repo name to add the docker image. Default is `local`. +* `[IMAGE_NAME]`: Name to add the docker image. Default is `cm`. +* `[IMAGE_TAG]`: Tag for the docker image. Default is `latest`. +* `--cache`: If `no` turns off docker build caching. Default is cache on. +* `[--docker_os, --docker_os_version, --cm_repo and --script_tags]` are additional options which are passed to the [dockerfile build script](../build-dockerfile) if needed. + diff --git a/script/build-docker-image/README.md b/script/build-docker-image/README.md new file mode 100644 index 0000000000..49d7369483 --- /dev/null +++ b/script/build-docker-image/README.md @@ -0,0 +1,159 @@ +Automatically generated README for this automation recipe: **build-docker-image** + +Category: **Docker automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=build-docker-image,2c3c4ba2413442e7) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *build,docker,image,docker-image,dockerimage* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "build docker image docker-image dockerimage" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=build,docker,image,docker-image,dockerimage` + +`cm run script --tags=build,docker,image,docker-image,dockerimage [--input_flags]` + +*or* + +`cmr "build docker image docker-image dockerimage"` + +`cmr "build docker image docker-image dockerimage " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'build,docker,image,docker-image,dockerimage' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="build,docker,image,docker-image,dockerimage"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=build,docker,image,docker-image,dockerimage) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "build docker image docker-image dockerimage" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--cache=value` → `CM_DOCKER_CACHE=value` +* `--cm_repo=value` → `CM_MLOPS_REPO=value` +* `--docker_os=value` → `CM_DOCKER_OS=value` +* `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` +* `--dockerfile=value` → `CM_DOCKERFILE_WITH_PATH=value` +* `--gh_token=value` → `CM_GH_TOKEN=value` +* `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value` +* `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` +* `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` +* `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` +* `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` +* `--real_run=value` → `CM_REAL_RUN=value` +* `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "cache":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DOCKER_IMAGE_REPO: `local` +* CM_DOCKER_IMAGE_TAG: `latest` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image/_cm.json)*** + * build,dockerfile + * `if (CM_BUILD_DOCKERFILE in ['yes', '1'])` + - CM script: [build-dockerfile](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-dockerfile) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-docker-image/_cm.json) + +___ +### Script output +`cmr "build docker image docker-image dockerimage " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DOCKER_*` +#### New environment keys auto-detected from customize + +* `CM_DOCKER_BUILD_ARGS` +* `CM_DOCKER_CACHE_ARG` +* `CM_DOCKER_IMAGE_NAME` +* `CM_DOCKER_IMAGE_REPO` +* `CM_DOCKER_IMAGE_TAG` \ No newline at end of file diff --git a/script/build-docker-image/_cm.json b/script/build-docker-image/_cm.json new file mode 100644 index 0000000000..a39d6d379a --- /dev/null +++ b/script/build-docker-image/_cm.json @@ -0,0 +1,48 @@ +{ + "alias": "build-docker-image", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Docker automation", + "cache": false, + "input_mapping": { + "cache": "CM_DOCKER_CACHE", + "cm_repo": "CM_MLOPS_REPO", + "docker_os": "CM_DOCKER_OS", + "docker_os_version": "CM_DOCKER_OS_VERSION", + "dockerfile": "CM_DOCKERFILE_WITH_PATH", + "gh_token": "CM_GH_TOKEN", + "image_repo": "CM_DOCKER_IMAGE_REPO", + "image_name": "CM_DOCKER_IMAGE_NAME", + "image_tag": "CM_DOCKER_IMAGE_TAG", + "script_tags": "CM_DOCKER_RUN_SCRIPT_TAGS", + "real_run": "CM_REAL_RUN", + "pre_run_cmds": "CM_DOCKER_PRE_RUN_COMMANDS", + "post_run_cmds": "CM_DOCKER_POST_RUN_COMMANDS" + }, + "default_env": { + "CM_DOCKER_IMAGE_REPO": "local", + "CM_DOCKER_IMAGE_TAG": "latest" + }, + "new_env_keys": [ + "CM_DOCKER_*" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_BUILD_DOCKERFILE": [ + "yes", + "1" + ] + }, + "tags": "build,dockerfile" + } + ], + "tags": [ + "build", + "docker", + "image", + "docker-image", + "dockerimage" + ], + "uid": "2c3c4ba2413442e7" +} diff --git a/script/build-docker-image/customize.py b/script/build-docker-image/customize.py new file mode 100644 index 0000000000..2cfab08594 --- /dev/null +++ b/script/build-docker-image/customize.py @@ -0,0 +1,88 @@ +from cmind import utils +import os +from os.path import exists + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + if dockerfile_path!='' and os.path.exists(dockerfile_path): + build_dockerfile = False + env['CM_BUILD_DOCKERFILE'] = "no" + os.chdir(os.path.dirname(dockerfile_path)) + else: + build_dockerfile = True + env['CM_BUILD_DOCKERFILE'] = "yes" + + + CM_DOCKER_BUILD_ARGS = env.get('+ CM_DOCKER_BUILD_ARGS', []) + + if 'CM_GH_TOKEN' in env: + CM_DOCKER_BUILD_ARGS.append( "CM_GH_TOKEN="+env['CM_GH_TOKEN'] ) + + if CM_DOCKER_BUILD_ARGS: + build_args = "--build-arg "+ " --build-arg ".join(CM_DOCKER_BUILD_ARGS) + else: + build_args = "" + + env['CM_DOCKER_BUILD_ARGS'] = build_args + +# if 'CM_DOCKERFILE_WITH_PATH' not in env or not exists(env['CM_DOCKERFILE_WITH_PATH']): +# env['CM_BUILD_DOCKERFILE'] = "yes" +# else: +# env['CM_BUILD_DOCKERFILE'] = "no" +# + if "CM_DOCKER_IMAGE_REPO" not in env: + env['CM_DOCKER_IMAGE_REPO'] = "local" + + docker_image_name = env.get('CM_DOCKER_IMAGE_NAME', '') + if docker_image_name == '': + docker_image_name = env.get('CM_DOCKER_RUN_SCRIPT_TAGS','').replace(',', '-').replace('_','') + if docker_image_name == '': + docker_image_name = 'cm' + + env['CM_DOCKER_IMAGE_NAME'] = docker_image_name + + if env.get("CM_DOCKER_IMAGE_TAG", "") == '': + env['CM_DOCKER_IMAGE_TAG'] = "latest" + + if env.get("CM_DOCKER_CACHE", "yes") == "no": + env["CM_DOCKER_CACHE_ARG"] = " --no-cache" + + CMD = '' + if not build_dockerfile: + # Write .dockerignore + with open('.dockerignore', 'w') as f: + f.write('.git\n') + + # Prepare CMD to build image + XCMD = [ + 'docker build ' + env.get('CM_DOCKER_CACHE_ARG',''), + ' ' + build_args, + ' -f "' + dockerfile_path + '"', + ' -t "' + env.get('CM_DOCKER_IMAGE_REPO', '') + '/' + \ + env.get('CM_DOCKER_IMAGE_NAME', '') + ':' + \ + env.get('CM_DOCKER_IMAGE_TAG', '') + '"', + ' .' + ] + + with open(dockerfile_path + '.build.sh', 'w') as f: + f.write(' \\\n'.join(XCMD) + '\n') + + with open(dockerfile_path + '.build.bat', 'w') as f: + f.write(' ^\n'.join(XCMD) + '\n') + + CMD = ''.join(XCMD) + + print ('') + print ('CM generated the following Docker build command:') + print ('') + print (CMD) + + print ('') + + env['CM_DOCKER_BUILD_CMD'] = CMD + + return {'return':0} diff --git a/script/build-docker-image/examples/0-common.bat b/script/build-docker-image/examples/0-common.bat new file mode 100644 index 0000000000..721cc1b5d7 --- /dev/null +++ b/script/build-docker-image/examples/0-common.bat @@ -0,0 +1,21 @@ +set DOCKER_IMAGE_REPO=cknowledge + +set DOCKER_OS=ubuntu + +rem set DOCKER_OS_VER=22.04 +set DOCKER_OS_VER=23.04 +set DOCKER_PIP_EXTRA_FLAGS=--break-system-packages + +rem set DOCKER_IMAGE_NAME=cm-base +set DOCKER_IMAGE_NAME=cm-script-app-image-classification-onnx-py +set DOCKER_IMAGE_POST_FILE=%CD%\extra-cmd.cm-script-app-image-classification-onnx-py + +rem set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-20230804 + +set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-latest +set DOCKERFILE_EXT=%DOCKER_IMAGE_NAME%-%DOCKER_IMAGE_TAG% + +set DOCKER_PACKAGE_MANAGER_UPDATE_CMD="apt-get update -y && apt-get upgrade -y" + +set DOCKER_CM_MLOPS_REPO="ctuning@mlcommons-ck" +rem set DOCKER_CM_MLOPS_REPO="mlcommons@ck" diff --git a/script/build-docker-image/examples/0-generate.bat b/script/build-docker-image/examples/0-generate.bat new file mode 100644 index 0000000000..443d029aef --- /dev/null +++ b/script/build-docker-image/examples/0-generate.bat @@ -0,0 +1,9 @@ +call 0-common.bat + +cmr "build dockerfile" --file_path=%CD%\Dockerfile.%DOCKERFILE_EXT% ^ + --docker_os=%DOCKER_OS% ^ + --docker_os_version=%DOCKER_OS_VER% ^ + --package_manager_update_cmd=%DOCKER_PACKAGE_MANAGER_UPDATE_CMD% ^ + --pip_extra_flags=%DOCKER_PIP_EXTRA_FLAGS% ^ + --post_file=%DOCKER_IMAGE_POST_FILE% ^ + --cm_repo=%DOCKER_CM_MLOPS_REPO% diff --git a/script/build-docker-image/examples/1-build.bat b/script/build-docker-image/examples/1-build.bat new file mode 100644 index 0000000000..2356eb0320 --- /dev/null +++ b/script/build-docker-image/examples/1-build.bat @@ -0,0 +1,8 @@ +call 0-common.bat + +cmr "build docker image" --dockerfile=%CD%\Dockerfile.%DOCKERFILE_EXT% ^ + --docker_os=%DOCKER_OS% ^ + --docker_os_version=%DOCKER_OS_VER% ^ + --image_repo=%DOCKER_IMAGE_REPO% ^ + --image_name=%DOCKER_IMAGE_NAME% ^ + --image_tag=%DOCKER_IMAGE_TAG% diff --git a/script/build-docker-image/examples/2-run-cm-command1.bat b/script/build-docker-image/examples/2-run-cm-command1.bat new file mode 100644 index 0000000000..eeeadd311d --- /dev/null +++ b/script/build-docker-image/examples/2-run-cm-command1.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% "cmr 'detect os' -j" diff --git a/script/build-docker-image/examples/2-run-cm-command2.bat b/script/build-docker-image/examples/2-run-cm-command2.bat new file mode 100644 index 0000000000..ac1c8a3a6b --- /dev/null +++ b/script/build-docker-image/examples/2-run-cm-command2.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --run_cmd="cmr 'detect os' -j" diff --git a/script/build-docker-image/examples/2-run-cm-command3.bat b/script/build-docker-image/examples/2-run-cm-command3.bat new file mode 100644 index 0000000000..e690f093c7 --- /dev/null +++ b/script/build-docker-image/examples/2-run-cm-command3.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os diff --git a/script/build-docker-image/examples/2-run-cm-command4.bat b/script/build-docker-image/examples/2-run-cm-command4.bat new file mode 100644 index 0000000000..c2e6f801c6 --- /dev/null +++ b/script/build-docker-image/examples/2-run-cm-command4.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os --it diff --git a/script/build-docker-image/examples/2-run-cm-command5.bat b/script/build-docker-image/examples/2-run-cm-command5.bat new file mode 100644 index 0000000000..d153437f1e --- /dev/null +++ b/script/build-docker-image/examples/2-run-cm-command5.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cm docker script --tags=detect,os -j diff --git a/script/build-docker-image/examples/2-run-interactive1.bat b/script/build-docker-image/examples/2-run-interactive1.bat new file mode 100644 index 0000000000..917dda9309 --- /dev/null +++ b/script/build-docker-image/examples/2-run-interactive1.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c bash diff --git a/script/build-docker-image/examples/2-run-interactive2.bat b/script/build-docker-image/examples/2-run-interactive2.bat new file mode 100644 index 0000000000..67dd226506 --- /dev/null +++ b/script/build-docker-image/examples/2-run-interactive2.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --it diff --git a/script/build-docker-image/examples/3-push-to-docker-hub.bat b/script/build-docker-image/examples/3-push-to-docker-hub.bat new file mode 100644 index 0000000000..2c9eb634df --- /dev/null +++ b/script/build-docker-image/examples/3-push-to-docker-hub.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +docker push %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 new file mode 100644 index 0000000000..418e733631 --- /dev/null +++ b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 @@ -0,0 +1,38 @@ +FROM ubuntu:22.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck --dummy + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 new file mode 100644 index 0000000000..478e155f65 --- /dev/null +++ b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 @@ -0,0 +1,38 @@ +FROM ubuntu:23.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests --break-system-packages + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck --dummy + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest new file mode 100644 index 0000000000..832a376692 --- /dev/null +++ b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest @@ -0,0 +1,38 @@ +FROM ubuntu:23.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests --break-system-packages + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo ctuning@mlcommons-ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version diff --git a/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest b/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest new file mode 100644 index 0000000000..7ce0af2fbb --- /dev/null +++ b/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest @@ -0,0 +1,45 @@ +FROM ubuntu:23.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests --break-system-packages + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo ctuning@mlcommons-ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version + +# Create virtual python environment +RUN cmr "install python-venv" --name=cm --quiet + +# Run image classification and install all related CM components automatically +RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet + diff --git a/script/build-docker-image/examples/README.md b/script/build-docker-image/examples/README.md new file mode 100644 index 0000000000..8035bc429b --- /dev/null +++ b/script/build-docker-image/examples/README.md @@ -0,0 +1 @@ +https://hub.docker.com/r/cknowledge/cm-base/tags diff --git a/script/build-docker-image/examples/computer_mouse.jpg b/script/build-docker-image/examples/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py b/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py new file mode 100644 index 0000000000..981f9b94db --- /dev/null +++ b/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py @@ -0,0 +1,6 @@ + +# Create virtual python environment +RUN cmr "install python-venv" --name=cm --quiet + +# Run image classification and install all related CM components automatically +RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat new file mode 100644 index 0000000000..c4f8e2204e --- /dev/null +++ b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat @@ -0,0 +1,6 @@ +rem call this script with computer_mouse.jpg as input + +call 0-common.bat + +rem docker run -v %CD%:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1" +docker run -v %CD%:/tmp/host -it --rm %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1" \ No newline at end of file diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh new file mode 100644 index 0000000000..55314e9e4d --- /dev/null +++ b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +# call this script with computer_mouse.jpg as input + +docker run -v $PWD:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/$1" diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat new file mode 100644 index 0000000000..762ed99fdc --- /dev/null +++ b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat @@ -0,0 +1 @@ +docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm" diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh new file mode 100644 index 0000000000..a24a06ed9d --- /dev/null +++ b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm" diff --git a/script/build-docker-image/run.bat b/script/build-docker-image/run.bat new file mode 100644 index 0000000000..89a8f6f456 --- /dev/null +++ b/script/build-docker-image/run.bat @@ -0,0 +1,12 @@ +if exist %CM_DOCKERFILE_WITH_PATH% ( +rem echo .git > .dockerignore + +rem echo. +rem echo docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f %CM_DOCKERFILE_WITH_PATH% -t %CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG% . + +rem echo. +rem docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f "%CM_DOCKERFILE_WITH_PATH%" -t "%CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG%" . + + %CM_DOCKER_BUILD_CMD% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) diff --git a/script/build-docker-image/run.sh b/script/build-docker-image/run.sh new file mode 100644 index 0000000000..a7e19e3189 --- /dev/null +++ b/script/build-docker-image/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [ -f "${CM_DOCKERFILE_WITH_PATH}" ]; then +# echo ".git" > .dockerignore + +# echo "" +# echo "docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f ${CM_DOCKERFILE_WITH_PATH} -t ${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG} ." + +# docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f "${CM_DOCKERFILE_WITH_PATH}" -t "${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG}" . + + eval "${CM_DOCKER_BUILD_CMD}" + test $? -eq 0 || exit 1 +fi diff --git a/script/build-dockerfile/README-extra.md b/script/build-dockerfile/README-extra.md new file mode 100644 index 0000000000..992fee4b3e --- /dev/null +++ b/script/build-dockerfile/README-extra.md @@ -0,0 +1,27 @@ +# Build CM Dockerfile +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds a dockerfile with for using CM. + +## How to use +```bash +cm run script --tags=build,dockerfile --docker_os=[DOCKER_OS] --docker_os_version=[DOCKER_OS_VERSION] --build --image_repo=[IMAGE_REPO] --image_tag=[IMAGE_TAG] --gh_token=[GITHUB_AUTH_TOKEN] --script_tags=[CM_SCRIPT_TAGS] +``` +where +* `[DOCKER_OS]` is one of `ubuntu` or `rhel`. Default is `ubuntu`. +* `[DOCKER_OS_VERSION]` is one of `18.04`, `20.04`, `22.04` for `ubuntu` and `9` for `rhel`. Default is `20.04`. +* `--build` option calls the [CM docker image build script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/build-docker-image) to build a docker image from the generated dockerfile. Default is off. +* `[GITHUB_AUTH_TOKEN]`: Github auth token to be passed to docker build to use as build argument. This is optional. +* `[CM_SCRIPT_TAGS]`: Tags for the CM script which should be run as the last command inside dockerfile. This script will do a fake run and set up all its dependencies in the docker image once built. +* `[IMAGE_REPO]`: Repo name to add the docker image. Default is `local`. +* `[IMAGE_TAG]`: Tag for the docker image. Default is `latest`. + + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 + +## Sample dockerfiles +1. [Ubuntu 18.04](dockerfiles/ubuntu_18.04.Dockerfile) +2. [Ubuntu 20.04](dockerfiles/ubuntu_20.04.Dockerfile) +1. [Ubuntu 22.04](dockerfiles/ubuntu_22.04.Dockerfile) +1. [rhel9](dockerfiles/rhel_9.Dockerfile) + diff --git a/script/build-dockerfile/README.md b/script/build-dockerfile/README.md new file mode 100644 index 0000000000..cf554c5add --- /dev/null +++ b/script/build-dockerfile/README.md @@ -0,0 +1,185 @@ +Automatically generated README for this automation recipe: **build-dockerfile** + +Category: **Docker automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=build-dockerfile,e66a7483230d4641) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-dockerfile)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *build,dockerfile* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "build dockerfile" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=build,dockerfile` + +`cm run script --tags=build,dockerfile[,variations] [--input_flags]` + +*or* + +`cmr "build dockerfile"` + +`cmr "build dockerfile [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'build,dockerfile' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="build,dockerfile"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=build,dockerfile) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "build dockerfile[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_slim` + - Environment variables: + - *CM_DOCKER_BUILD_SLIM*: `yes` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--build=value` → `CM_BUILD_DOCKER_IMAGE=value` +* `--cache=value` → `CM_DOCKER_CACHE=value` +* `--cm_repo=value` → `CM_MLOPS_REPO=value` +* `--cm_repo_flags=value` → `CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=value` +* `--cm_repos=value` → `CM_DOCKER_EXTRA_CM_REPOS=value` +* `--comments=value` → `CM_DOCKER_RUN_COMMENTS=value` +* `--copy_files=value` → `CM_DOCKER_COPY_FILES=value` +* `--docker_base_image=value` → `CM_DOCKER_IMAGE_BASE=value` +* `--docker_os=value` → `CM_DOCKER_OS=value` +* `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` +* `--extra_sys_deps=value` → `CM_DOCKER_EXTRA_SYS_DEPS=value` +* `--fake_docker_deps=value` → `CM_DOCKER_FAKE_DEPS=value` +* `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value` +* `--file_path=value` → `CM_DOCKERFILE_WITH_PATH=value` +* `--gh_token=value` → `CM_GH_TOKEN=value` +* `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` +* `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` +* `--package_manager_update_cmd=value` → `CM_PACKAGE_MANAGER_UPDATE_CMD=value` +* `--pip_extra_flags=value` → `CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS=value` +* `--post_file=value` → `DOCKER_IMAGE_POST_FILE=value` +* `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` +* `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` +* `--real_run=value` → `CM_REAL_RUN=value` +* `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value` +* `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value` +* `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "build":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DOCKER_BUILD_SLIM: `no` +* CM_DOCKER_OS: `ubuntu` +* CM_DOCKER_IMAGE_EOL: ` +` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-dockerfile/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-dockerfile/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-dockerfile/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-dockerfile/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-dockerfile/_cm.json)*** + * build,docker,image + * `if (CM_BUILD_DOCKER_IMAGE in ['yes', '1'])` + * CM names: `--adr.['build-docker-image']...` + - CM script: [build-docker-image](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-docker-image) + +___ +### Script output +`cmr "build dockerfile [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DOCKERFILE_*` +#### New environment keys auto-detected from customize + +* `CM_DOCKERFILE_WITH_PATH` \ No newline at end of file diff --git a/script/build-dockerfile/_cm.json b/script/build-dockerfile/_cm.json new file mode 100644 index 0000000000..0a438e8fd9 --- /dev/null +++ b/script/build-dockerfile/_cm.json @@ -0,0 +1,69 @@ +{ + "alias": "build-dockerfile", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Docker automation", + "cache": false, + "input_mapping": { + "extra_sys_deps": "CM_DOCKER_EXTRA_SYS_DEPS", + "build": "CM_BUILD_DOCKER_IMAGE", + "cache": "CM_DOCKER_CACHE", + "cm_repo": "CM_MLOPS_REPO", + "cm_repo_flags": "CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO", + "cm_repos": "CM_DOCKER_EXTRA_CM_REPOS", + "docker_os": "CM_DOCKER_OS", + "docker_os_version": "CM_DOCKER_OS_VERSION", + "docker_base_image": "CM_DOCKER_IMAGE_BASE", + "fake_run_option": "CM_DOCKER_FAKE_RUN_OPTION", + "file_path": "CM_DOCKERFILE_WITH_PATH", + "gh_token": "CM_GH_TOKEN", + "fake_docker_deps": "CM_DOCKER_FAKE_DEPS", + "image_repo": "CM_DOCKER_IMAGE_REPO", + "image_tag": "CM_DOCKER_IMAGE_TAG", + "real_run": "CM_REAL_RUN", + "run_cmd": "CM_DOCKER_RUN_CMD", + "script_tags": "CM_DOCKER_RUN_SCRIPT_TAGS", + "comments": "CM_DOCKER_RUN_COMMENTS", + "run_cmd_extra": "CM_DOCKER_RUN_CMD_EXTRA", + "pre_run_cmds": "CM_DOCKER_PRE_RUN_COMMANDS", + "post_run_cmds": "CM_DOCKER_POST_RUN_COMMANDS", + "post_file": "DOCKER_IMAGE_POST_FILE", + "copy_files": "CM_DOCKER_COPY_FILES", + "pip_extra_flags": "CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS", + "package_manager_update_cmd": "CM_PACKAGE_MANAGER_UPDATE_CMD" + }, + "default_env": { + "CM_DOCKER_BUILD_SLIM": "no", + "CM_DOCKER_OS": "ubuntu", + "CM_DOCKER_IMAGE_EOL": "\n" + }, + "new_env_keys": [ + "CM_DOCKERFILE_*" + ], + "post_deps": [ + { + "enable_if_env": { + "CM_BUILD_DOCKER_IMAGE": [ + "yes", + "1" + ] + }, + "names": [ + "build-docker-image" + ], + "tags": "build,docker,image" + } + ], + "tags": [ + "build", + "dockerfile" + ], + "uid": "e66a7483230d4641", + "variations": { + "slim": { + "env": { + "CM_DOCKER_BUILD_SLIM": "yes" + } + } + } +} diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py new file mode 100644 index 0000000000..038f003384 --- /dev/null +++ b/script/build-dockerfile/customize.py @@ -0,0 +1,286 @@ +from cmind import utils +import cmind as cm +import os +import json + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if env["CM_DOCKER_OS"] not in [ "ubuntu", "rhel", "arch" ]: + return {'return': 1, 'error': "Currently only ubuntu, rhel and arch are supported in CM docker"} + + path = i['run_script_input']['path'] + + with open(os.path.join(path, "dockerinfo.json")) as f: + config = json.load(f) + + build_args = [] + build_args_default = {'CM_ADD_DOCKER_GROUP_ID':''} + input_args = [] + copy_files = [] + + if 'CM_DOCKER_RUN_SCRIPT_TAGS' in env: + script_tags=env['CM_DOCKER_RUN_SCRIPT_TAGS'] + found_scripts = cm.access({'action': 'search', 'automation': 'script', 'tags': script_tags}) + scripts_list = found_scripts['list'] + + if not scripts_list: + return {'return': 1, 'error': 'No CM script found for tags ' + script_tags} + + if len(scripts_list) > 1: + return {'return': 1, 'error': 'More than one scripts found for tags '+ script_tags} + + script = scripts_list[0] + input_mapping = script.meta.get('input_mapping', {}) + default_env = script.meta.get('default_env', {}) + + for input_,env_ in input_mapping.items(): + if input_ == "docker": + continue + arg=env_ + if env_ in default_env: #other inputs to be done later + arg=arg+"="+default_env[env_] + #build_args.append(arg) + #input_args.append("--"+input_+"="+"$"+env_) + + if "CM_DOCKER_OS_VERSION" not in env: + env["CM_DOCKER_OS_VERSION"] = "20.04" + + docker_image_base = get_value(env, config, 'FROM', 'CM_DOCKER_IMAGE_BASE') + if not docker_image_base: + return {'return': 1, 'error': f"Version \"{env['CM_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['CM_DOCKER_OS']}\" "} + + if "CM_MLOPS_REPO" in env: + cm_mlops_repo = env["CM_MLOPS_REPO"] + else: + cm_mlops_repo = "mlcommons@ck" + + if 'CM_DOCKERFILE_WITH_PATH' not in env: + env['CM_DOCKERFILE_WITH_PATH'] = os.path.join(os.getcwd(), "Dockerfile") + + dockerfile_with_path = env['CM_DOCKERFILE_WITH_PATH'] + dockerfile_dir = os.path.dirname(dockerfile_with_path) + + extra_dir = os.path.dirname(dockerfile_with_path) + + if extra_dir!='': + os.makedirs(extra_dir, exist_ok=True) + + f = open(dockerfile_with_path, "w") + EOL = env['CM_DOCKER_IMAGE_EOL'] + f.write('FROM ' + docker_image_base + EOL) + + # Maintainers + f.write(EOL) + f.write('# Automatically generated by the CM workflow automation meta-framework' + EOL) + f.write('# https://github.com/mlcommons/ck' + EOL) + f.write(EOL) + + f.write('LABEL github=""' + EOL) + f.write('LABEL maintainer=""' + EOL) + f.write('LABEL license=""' + EOL) + + f.write(EOL) + + image_label = get_value(env, config, 'LABEL', 'CM_DOCKER_IMAGE_LABEL') + if image_label: + f.write('LABEL ' + image_label + EOL) + f.write(EOL) + + shell = get_value(env, config, 'SHELL', 'CM_DOCKER_IMAGE_SHELL') + if shell: + f.write('SHELL ' + shell + EOL) + f.write(EOL) + + for arg in config['ARGS']: + f.write('ARG '+ arg + EOL) + + for build_arg in build_args: + f.write('ARG '+ build_arg + EOL) + + for build_arg in sorted(build_args_default): + v = build_args_default[build_arg] + f.write('ARG '+ build_arg + '="' + str(v) + '"' + EOL) + + f.write(EOL) + copy_cmds = [] + if 'CM_DOCKER_COPY_FILES' in env: + import shutil + for copy_file in env['CM_DOCKER_COPY_FILES']: + copy_split = copy_file.split(":") + if len(copy_split) != 2: + return {'return': 1, 'error': 'Invalid docker copy input {} given'.format(copy_file)} + filename = os.path.basename(copy_split[0]) + if not os.path.exists(os.path.join(dockerfile_dir, filename)): + shutil.copytree(copy_split[0], os.path.join(dockerfile_dir, filename)) + f.write('COPY '+ filename+" "+copy_split[1] + EOL) + + f.write(EOL+'# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes'+EOL+'# Install system dependencies' + EOL) + f.write('RUN ' + get_value(env, config, 'package-manager-update-cmd', 'CM_PACKAGE_MANAGER_UPDATE_CMD') + EOL) + f.write('RUN '+ get_value(env, config, 'package-manager-get-cmd') + " " + " ".join(get_value(env, config, + 'packages')) + EOL) + + if env.get('CM_DOCKER_EXTRA_SYS_DEPS', '')!='': + f.write('RUN ' + env['CM_DOCKER_EXTRA_SYS_DEPS'] + EOL) + + if env['CM_DOCKER_OS'] == "ubuntu": + if int(env['CM_DOCKER_OS_VERSION'].split('.')[0]) >= 23: + if "--break-system-packages" not in env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): + env['CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" + pip_extra_flags = env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '') + + + f.write(EOL+'# Setup docker environment' + EOL) + + entry_point = get_value(env, config, 'ENTRYPOINT', 'CM_DOCKER_IMAGE_ENTRYPOINT') + if entry_point: + f.write('ENTRYPOINT ' + entry_point + EOL) + + for key,value in config['ENV'].items(): + f.write('ENV '+ key + "=\"" + value + "\""+ EOL) + for cmd in config['RUN_CMDS']: + f.write('RUN '+ cmd + EOL) + + f.write(EOL+'# Setup docker user' + EOL) + docker_user = get_value(env, config, 'USER', 'CM_DOCKER_USER') + docker_userid = get_value(env, config, 'USERID', 'CM_DOCKER_USER_ID') + docker_group = get_value(env, config, 'GROUP', 'CM_DOCKER_GROUP') + docker_groupid = get_value(env, config, 'GROUPID', 'CM_DOCKER_GROUP_ID') + if docker_user: + if not docker_group: + docker_group = docker_user + DOCKER_GROUP = ' -g ' + docker_group + if docker_groupid: + DOCKER_GROUP_ID = "-g " + docker_groupid + else: + DOCKER_GROUP_ID = "" + f.write('RUN groupadd ${CM_ADD_DOCKER_GROUP_ID} ' + DOCKER_GROUP_ID + docker_group + EOL) + if docker_userid: + DOCKER_USER_ID = "-u " + docker_userid + else: + DOCKER_USER_ID = "" + user_shell = json.loads(shell) + f.write('RUN useradd ' + DOCKER_USER_ID + DOCKER_GROUP + ' --create-home --shell '+ user_shell[0] + ' ' + + docker_user + EOL) + f.write('RUN echo "' + docker_user + ' ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers' + EOL) + f.write('USER ' + docker_user + ":" + docker_group + EOL) + + dockerfile_env = i['input'].get('dockerfile_env', {}) + dockerfile_env_input_string = "" + for docker_env_key in dockerfile_env: + dockerfile_env_input_string = dockerfile_env_input_string + " --env."+docker_env_key+"="+str(dockerfile_env[docker_env_key]) + + workdir = get_value(env, config, 'WORKDIR', 'CM_DOCKER_WORKDIR') + if workdir: + f.write('WORKDIR ' + workdir + EOL) + + f.write(EOL+'# Install python packages' + EOL) + python = get_value(env, config, 'PYTHON', 'CM_DOCKERFILE_PYTHON') + f.write('RUN {} -m pip install --user '.format(python) + " ".join(get_value(env, config, 'python-packages')) + ' ' + pip_extra_flags + ' ' + EOL) + + f.write(EOL+'# Download CM repo for scripts' + EOL) + + # Add possibility to force rebuild with some extra flag for the repository + x = env.get('CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO','') + if x!='': x=' '+x + + f.write('RUN cm pull repo ' + cm_mlops_repo + x + EOL) + + # Check extra repositories + x = env.get('CM_DOCKER_EXTRA_CM_REPOS','') + if x!='': + for y in x.split(','): + f.write('RUN '+ y + EOL) + + f.write(EOL+'# Install all system dependencies' + EOL) + f.write('RUN cm run script --tags=get,sys-utils-cm --quiet' + EOL) + + if 'CM_DOCKER_PRE_RUN_COMMANDS' in env: + for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: + f.write('RUN '+ pre_run_cmd + EOL) + + run_cmd_extra=" "+env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":","=") + gh_token = get_value(env, config, "GH_TOKEN", "CM_GH_TOKEN") + if gh_token: + run_cmd_extra = " --env.CM_GH_TOKEN=$CM_GH_TOKEN" + + f.write(EOL+'# Run commands' + EOL) + for comment in env.get('CM_DOCKER_RUN_COMMENTS', []): + f.write(comment + EOL) + + skip_extra = False + if 'CM_DOCKER_RUN_CMD' not in env: + if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: + env['CM_DOCKER_RUN_CMD']="cm version" + skip_extra = True + else: + env['CM_DOCKER_RUN_CMD']="cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS']+ ' --quiet' + + fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION"," --fake_run") + dockerfile_env_input_string + fake_run = fake_run + " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run + + x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + + if not skip_extra: + x += fake_run + if '--quiet' not in x: + x+=' --quiet' + if run_cmd_extra!='': + x+=' '+run_cmd_extra + + f.write(x + EOL) + + #fake_run to install the dependent scripts and caching them + if not "run" in env['CM_DOCKER_RUN_CMD'] and str(env.get('CM_REAL_RUN', False)).lower() in ["false", "0", "no"]: + fake_run = dockerfile_env_input_string + + x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra + if '--quiet' not in x: + x+=' --quiet ' + x+=EOL + + f.write(x) + + if 'CM_DOCKER_POST_RUN_COMMANDS' in env: + for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: + f.write('RUN '+ post_run_cmd + EOL) + + post_file = env.get('DOCKER_IMAGE_POST_FILE','') + if post_file!='': + r = utils.load_txt(post_file) + if r['return']>0: return r + + s = r['string'] + f.write(s + EOL) + + f.close() + + #f = open(env['CM_DOCKERFILE_WITH_PATH'], "r") + #print(f.read()) + + return {'return':0} + +def get_value(env, config, key, env_key = None): + if not env_key: + env_key = key + + if env.get(env_key, None) != None: + return env[env_key] + + docker_os = env['CM_DOCKER_OS'] + docker_os_version = env['CM_DOCKER_OS_VERSION'] + + version_meta = config['distros'][docker_os]['versions'].get(docker_os_version, '') + if key in version_meta: + return version_meta[key] + + distro_meta = config['distros'][docker_os] + if key in distro_meta: + return distro_meta[key] + + if key in config: + return config[key] + + return None diff --git a/script/build-dockerfile/dockerfiles/rhel_9.Dockerfile b/script/build-dockerfile/dockerfiles/rhel_9.Dockerfile new file mode 100644 index 0000000000..4deef4ec75 --- /dev/null +++ b/script/build-dockerfile/dockerfiles/rhel_9.Dockerfile @@ -0,0 +1,32 @@ +FROM registry.access.redhat.com/ubi9 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN dnf update -y +RUN dnf install -y python3 python-pip git wget sudo binutils + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run command +RUN cm version diff --git a/script/build-dockerfile/dockerfiles/ubuntu_18.04.Dockerfile b/script/build-dockerfile/dockerfiles/ubuntu_18.04.Dockerfile new file mode 100644 index 0000000000..73a9b0fd37 --- /dev/null +++ b/script/build-dockerfile/dockerfiles/ubuntu_18.04.Dockerfile @@ -0,0 +1,32 @@ +FROM ubuntu:18.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run command +RUN cm version diff --git a/script/build-dockerfile/dockerfiles/ubuntu_20.04.Dockerfile b/script/build-dockerfile/dockerfiles/ubuntu_20.04.Dockerfile new file mode 100644 index 0000000000..c07b594335 --- /dev/null +++ b/script/build-dockerfile/dockerfiles/ubuntu_20.04.Dockerfile @@ -0,0 +1,32 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run command +RUN cm version diff --git a/script/build-dockerfile/dockerfiles/ubuntu_22.04.Dockerfile b/script/build-dockerfile/dockerfiles/ubuntu_22.04.Dockerfile new file mode 100644 index 0000000000..05b1d69b37 --- /dev/null +++ b/script/build-dockerfile/dockerfiles/ubuntu_22.04.Dockerfile @@ -0,0 +1,32 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run command +RUN cm version diff --git a/script/build-dockerfile/dockerinfo.json b/script/build-dockerfile/dockerinfo.json new file mode 100644 index 0000000000..d669ee068c --- /dev/null +++ b/script/build-dockerfile/dockerinfo.json @@ -0,0 +1,80 @@ +{ + "python-packages": [ + "cmind", "requests", "giturlparse", "tabulate" + ], + "ARGS": [ + "CM_GH_TOKEN" + ], + "ENTRYPOINT": "[\"/bin/bash\", \"-c\"]", + "ENV": { + "TZ": "US/Pacific", + "PATH": "${PATH}:/home/cmuser/.local/bin" + }, + "RUN_CMDS": [ + "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone" + ], + "PYTHON": "python3", + "USERID": "", + "USER": "cmuser", + "GROUPID": "", + "GROUP": "cm", + "SHELL": "[\"/bin/bash\", \"-c\"]", + "WORKDIR": "/home/cmuser", + "distros": { + "ubuntu": { + "package-manager-update-cmd": "apt-get update -y", + "package-manager-get-cmd": "apt-get install -y", + "packages": [ + "python3", "python3-pip", "git", "sudo", "wget" + ], + "versions": { + "18.04": { + "FROM": "ubuntu:18.04" + }, + "20.04": { + "FROM": "ubuntu:20.04" + }, + "22.04": { + "FROM": "ubuntu:22.04" + }, + "23.04": { + "FROM": "ubuntu:23.04" + } + } + }, + "rhel": { + "FROM": "registry.access.redhat.com/ubi9", + "package-manager-update-cmd": "dnf update -y", + "package-manager-get-cmd": "dnf install -y", + "packages": [ + "python3", "python-pip", "git", "wget", "sudo", "binutils" + ], + "versions": { + "9": { + }, + "8": { + "FROM": "registry.access.redhat.com/ubi8", + "packages": [ + "python39", "python39-pip", "git", "wget", "sudo", "binutils" + ], + "python-packages": [ + "cmind", "requests", "giturlparse", "tabulate" + ], + "PYTHON": "python3.9" + } + } + }, + "arch": { + "FROM": "archlinux", + "package-manager-update-cmd": "pacman -Syu --noconfirm", + "package-manager-get-cmd": "pacman -Sy --noconfirm", + "packages": [ + "python", "python-pip", "git", "wget", "sudo", "binutils" + ], + "versions": { + "latest": { + } + } + } + } +} diff --git a/script/build-mlperf-inference-server-nvidia/README-extra.md b/script/build-mlperf-inference-server-nvidia/README-extra.md new file mode 100644 index 0000000000..f05fd83220 --- /dev/null +++ b/script/build-mlperf-inference-server-nvidia/README-extra.md @@ -0,0 +1,2 @@ +# About +This CM script builds the Nvidia C++ implementation of MLPerf Inference diff --git a/script/build-mlperf-inference-server-nvidia/README.md b/script/build-mlperf-inference-server-nvidia/README.md new file mode 100644 index 0000000000..2b58170af5 --- /dev/null +++ b/script/build-mlperf-inference-server-nvidia/README.md @@ -0,0 +1,244 @@ +Automatically generated README for this automation recipe: **build-mlperf-inference-server-nvidia** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=build-mlperf-inference-server-nvidia,f37403af5e9f4541) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia` + +`cm run script --tags=build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia[,variations] [--input_flags]` + +*or* + +`cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia"` + +`cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "build mlcommons mlperf inference inference-server server nvidia-harness nvidia[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**code**" +
+ Click here to expand this section. + + * **`_ctuning`** (default) + - Workflow: + * `_custom` + - Workflow: + * `_mlcommons` + - Workflow: + * `_nvidia-only` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * `_cpu` + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * **`_cuda`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cuda` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + * `_inferentia` + - Environment variables: + - *CM_MLPERF_DEVICE*: `inferentia` + - Workflow: + +
+ + +#### Default variations + +`_ctuning,_cuda` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--clean=value` → `CM_MAKE_CLEAN=value` +* `--custom_system=value` → `CM_CUSTOM_SYSTEM_NVIDIA=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "clean":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MAKE_BUILD_COMMAND: `build` +* CM_MAKE_CLEAN: `no` +* CM_CUSTOM_SYSTEM_NVIDIA: `yes` + +
+ +#### Versions +Default version: `r3.1` + +* `r2.1` +* `r3.0` +* `r3.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cuda,_cudnn + * `if (CM_MLPERF_DEVICE in ['cuda', 'inferentia'])` + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,tensorrt,_dev + * `if (CM_MLPERF_DEVICE in ['cuda', 'inferentia']) AND (CM_TENSORRT_SYSTEM_DETECT != True)` + * CM names: `--adr.['tensorrt']...` + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + * get,gcc + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,generic,sys-util,_glog-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_gflags-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libgmock-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libre2-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libnuma-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libboost-all-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_rapidjson-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,nvidia,mlperf,inference,common-code + * CM names: `--adr.['nvidia-inference-common-code']...` + - CM script: [get-mlperf-inference-nvidia-common-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code) + * get,generic-python-lib,_package.pybind11 + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pycuda + * `if (CM_RUN_STATE_DOCKER not in ['yes', True, 'True'])` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_nvidia-dali + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,mlperf,inference,nvidia,scratch,space + * CM names: `--adr.['nvidia-scratch-space']...` + - CM script: [get-mlperf-inference-nvidia-scratch-space](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml)*** + * add,custom,system,nvidia + * `if (CM_CUSTOM_SYSTEM_NVIDIA not in ['no', False, 'False'])` + * CM names: `--adr.['custom-system-nvidia', 'nvidia-inference-common-code']...` + - CM script: [add-custom-nvidia-system](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/add-custom-nvidia-system) + +___ +### Script output +`cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH` +#### New environment keys auto-detected from customize diff --git a/script/build-mlperf-inference-server-nvidia/_cm.yaml b/script/build-mlperf-inference-server-nvidia/_cm.yaml new file mode 100644 index 0000000000..6dd5ebe8e3 --- /dev/null +++ b/script/build-mlperf-inference-server-nvidia/_cm.yaml @@ -0,0 +1,255 @@ +# Identification of this CM script +alias: build-mlperf-inference-server-nvidia +uid: f37403af5e9f4541 +cache: true +automation_alias: script +automation_uid: 5b4e0237da074764 +default_version: r3.1 + +category: "MLPerf benchmark support" + + +# User-friendly tags to find this CM script +tags: + - build + - mlcommons + - mlperf + - inference + - inference-server + - server + - nvidia-harness + - nvidia + + +new_env_keys: + - CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH + +default_env: + CM_MAKE_BUILD_COMMAND: build + CM_MAKE_CLEAN: "no" + CM_CUSTOM_SYSTEM_NVIDIA: "yes" + +input_mapping: + custom_system: CM_CUSTOM_SYSTEM_NVIDIA + clean: CM_MAKE_CLEAN + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect python3 + - tags: get,python3 + names: + - python + - python3 + + # Detect CUDA + - tags: get,cuda,_cudnn + names: + - cuda + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + - inferentia + + # Detect Tensorrt + - tags: get,tensorrt,_dev + names: + - tensorrt + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + - inferentia + skip_if_env: + CM_TENSORRT_SYSTEM_DETECT: + - yes + + # Detect gcc + - tags: get,gcc + + # Detect CMake + - tags: get,cmake + version_min: "3.25" + + # Detect Google Logger + - tags: get,generic,sys-util,_glog-dev + + # Detect GFlags + - tags: get,generic,sys-util,_gflags-dev + + # Detect libgmock-dev + - tags: get,generic,sys-util,_libgmock-dev + + # Detect libre2-dev + - tags: get,generic,sys-util,_libre2-dev + + # Detect libnuma-dev + - tags: get,generic,sys-util,_libnuma-dev + + # Detect libboost-all-dev + - tags: get,generic,sys-util,_libboost-all-dev + + # Detect rapidjson-dev + - tags: get,generic,sys-util,_rapidjson-dev + + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + - tags: get,generic-python-lib,_package.pybind11 + + # Detect pycuda + - tags: get,generic-python-lib,_pycuda + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + - True + - 'True' + + # Detect opencv-python + - tags: get,generic-python-lib,_opencv-python + + # Detect nvidia-dali + - tags: get,generic-python-lib,_nvidia-dali + + # Get Nvidia scratch space where data and models get downloaded + - tags: get,mlperf,inference,nvidia,scratch,space + names: + - nvidia-scratch-space + + +post_deps: + # Detect nvidia system + - tags: add,custom,system,nvidia + names: + - custom-system-nvidia + - nvidia-inference-common-code + skip_if_env: + CM_CUSTOM_SYSTEM_NVIDIA: + - "no" + - False + - "False" + +variations: + # Target devices + cpu: + group: device + env: + CM_MLPERF_DEVICE: cpu + inferentia: + group: device + env: + CM_MLPERF_DEVICE: inferentia + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: cuda + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + ctuning: + group: code + default: true + add_deps_recursive: + nvidia-inference-common-code: + tags: _ctuning + nvidia-only: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _nvidia-only + custom: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _custom + mlcommons: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _mlcommons + + +versions: + r2.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r2.1 + nvidia-scratch-space: + tags: _version.2_1 + + r3.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.0 + nvidia-scratch-space: + tags: _version.3_0 + + r3.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.1 + nvidia-scratch-space: + tags: _version.4_0 + deps: + - tags: install,nccl,libs,_cuda + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + names: + - pytorch + - torch + - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v3.1 + names: + - pytorchvision + - torchvision + +docker: + skip_run_cmd: 'no' + all_gpus: 'yes' + shm_size: '32gb' + extra_run_args: ' --runtime=nvidia --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + docker_os: ubuntu + docker_real_run: False + interactive: True + docker_os_version: '20.04' + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v3.1-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-l4-public + docker_input_mapping: + imagenet_path: IMAGENET_PATH + gptj_checkpoint_path: GPTJ_CHECKPOINT_PATH + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + results_dir: RESULTS_DIR + submission_dir: SUBMISSION_DIR + cudnn_tar_file_path: CM_CUDNN_TAR_FILE_PATH + tensorrt_tar_file_path: CM_TENSORRT_TAR_FILE_PATH + cuda_run_file_path: CUDA_RUN_FILE_LOCAL_PATH + dlrm_data_path: DLRM_DATA_PATH + scratch_path: MLPERF_SCRATCH_PATH + deps: + - tags: get,mlperf,inference,nvidia,scratch,space + - tags: get,mlperf,inference,results,dir + - tags: get,mlperf,inference,submission,dir + - tags: get,nvidia-docker + pre_run_cmds: + - cm pull repo + run_cmd_prefix: sudo apt remove -y cmake + mounts: + - "${{ IMAGENET_PATH }}:/data/imagenet-val" + - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}" + - "${{ RESULTS_DIR }}:/home/cmuser/results_dir" + - "${{ SUBMISSION_DIR }}:/home/cmuser/submission_dir" + - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}" + - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}" + - "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}" + - "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}" + - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" diff --git a/script/build-mlperf-inference-server-nvidia/customize.py b/script/build-mlperf-inference-server-nvidia/customize.py new file mode 100644 index 0000000000..e540beb0bc --- /dev/null +++ b/script/build-mlperf-inference-server-nvidia/customize.py @@ -0,0 +1,41 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if '+LIBRARY_PATH' not in env: + env['+LIBRARY_PATH'] = [] + + if 'CM_TENSORRT_INSTALL_PATH' in env: + env['+LIBRARY_PATH'].append(os.path.join(env['CM_TENSORRT_INSTALL_PATH'], "lib")) + + cxxflags = [ "-Wno-error=switch", "-DDALI_1_15=1", "-Wno-error=maybe-uninitialized" ] + + if env.get('CM_GCC_VERSION', '') != '': + gcc_major_version = env['CM_GCC_VERSION'].split(".")[0] + if int(gcc_major_version) > 10: + cxxflags.append("-Wno-error=range-loop-construct") + + if env.get('CM_MLPERF_DEVICE','') == "inferentia": + env['USE_INFERENTIA'] = "1" + env['USE_NIGHTLY'] = "0" + env['CM_MAKE_BUILD_COMMAND'] = "build" + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + + env['+ CXXFLAGS'] += cxxflags + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/build-mlperf-inference-server-nvidia/run.sh b/script/build-mlperf-inference-server-nvidia/run.sh new file mode 100644 index 0000000000..e03aaa72b8 --- /dev/null +++ b/script/build-mlperf-inference-server-nvidia/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash +CUR=$PWD + +cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} + +if [[ ${CM_MAKE_CLEAN} == "yes" ]]; then + make clean +fi + +if [[ ${CM_MLPERF_DEVICE} == "inferentia" ]]; then + make prebuild +fi + +SKIP_DRIVER_CHECK=1 make ${CM_MAKE_BUILD_COMMAND} + +test $? -eq 0 || exit $? diff --git a/script/calibrate-model-for.qaic/README.md b/script/calibrate-model-for.qaic/README.md new file mode 100644 index 0000000000..0ab6ec1c12 --- /dev/null +++ b/script/calibrate-model-for.qaic/README.md @@ -0,0 +1,288 @@ +Automatically generated README for this automation recipe: **calibrate-model-for.qaic** + +Category: **AI/ML optimization** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=calibrate-model-for.qaic,817bad70df2f4e45) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *qaic,calibrate,profile,qaic-profile,qaic-calibrate* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "qaic calibrate profile qaic-profile qaic-calibrate" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=qaic,calibrate,profile,qaic-profile,qaic-calibrate` + +`cm run script --tags=qaic,calibrate,profile,qaic-profile,qaic-calibrate[,variations] ` + +*or* + +`cmr "qaic calibrate profile qaic-profile qaic-calibrate"` + +`cmr "qaic calibrate profile qaic-profile qaic-calibrate [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'qaic,calibrate,profile,qaic-profile,qaic-calibrate' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="qaic,calibrate,profile,qaic-profile,qaic-calibrate"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=qaic,calibrate,profile,qaic-profile,qaic-calibrate) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "qaic calibrate profile qaic-profile qaic-calibrate[variations]" ` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_bert_` + - Environment variables: + - *CM_QAIC_MODEL_NAME*: `bert-large` + - *CM_CREATE_INPUT_BATCH*: `no` + - Workflow: + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_first.#` + - Workflow: + * `_resnet50,tf` + - Environment variables: + - *CM_QAIC_MODEL_TO_CONVERT*: `calibrate_resnet50_tf` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_bs.#` + - Environment variables: + - *CM_QAIC_MODEL_BATCH_SIZE*: `#` + - *CM_CREATE_INPUT_BATCH*: `yes` + - Workflow: + * `_bs.1` + - Environment variables: + - *CM_QAIC_MODEL_BATCH_SIZE*: `1` + - *CM_CREATE_INPUT_BATCH*: `yes` + - Workflow: + +
+ + + * Group "**calib-dataset-filter-size**" +
+ Click here to expand this section. + + * `_filter-size.#` + - Workflow: + +
+ + + * Group "**calibration-option**" +
+ Click here to expand this section. + + * `_mlperf.option1` + - Workflow: + * `_mlperf.option2` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_bert-99` + - Environment variables: + - *CM_CALIBRATE_SQUAD*: `yes` + - *CM_QAIC_COMPILER_ARGS*: `` + - *CM_QAIC_COMPILER_PARAMS*: `-onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> -input-list-file=<<>> -num-histogram-bins=512 -profiling-threads=<<>>` + - *CM_QAIC_MODEL_TO_CONVERT*: `calibrate_bert_mlperf` + - Workflow: + * `_resnet50` + - Environment variables: + - *CM_QAIC_MODEL_NAME*: `resnet50` + - *CM_CALIBRATE_IMAGENET*: `yes` + - *CM_QAIC_COMPILER_ARGS*: `` + - *CM_QAIC_COMPILER_PARAMS*: `-output-node-name=ArgMax -profiling-threads=<<>>` + - *CM_QAIC_OUTPUT_NODE_NAME*: `-output-node-name=ArgMax` + - *CM_QAIC_MODEL_TO_CONVERT*: `calibrate_resnet50_tf` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_QAIC_MODEL_NAME*: `retinanet` + - *CM_CALIBRATE_OPENIMAGES*: `yes` + - *CM_QAIC_COMPILER_ARGS*: `` + - *CM_QAIC_COMPILER_PARAMS*: `-enable-channelwise -profiling-threads=<<>> -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>>` + - *CM_QAIC_MODEL_TO_CONVERT*: `calibrate_retinanet_no_nms_mlperf` + - Workflow: + +
+ + + * Group "**model-framework**" +
+ Click here to expand this section. + + * `_tf` + - Workflow: + +
+ + + * Group "**seq-length**" +
+ Click here to expand this section. + + * `_seq.#` + - Environment variables: + - *CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH*: `#` + - Workflow: + * `_seq.384` + - Environment variables: + - *CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH*: `#` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,qaic,apps,sdk + * CM names: `--adr.['qaic-apps-sdk']...` + - CM script: [get-qaic-apps-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-qaic-apps-sdk) + * get,preprocessed,dataset,_calibration,openimages,_for.retinanet.onnx,_NCHW,_fp32,_custom-annotations + * `if (CM_CALIBRATE_OPENIMAGES == yes)` + * CM names: `--adr.['openimages-cal', 'preprocessed-dataset']...` + - CM script: [get-preprocessed-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openimages) + * get,dataset,imagenet,preprocessed,_calibration,_for.resnet50,_float32,_rgb32 + * `if (CM_CALIBRATE_IMAGENET == yes)` + * CM names: `--adr.['imagenet-cal', 'preprocessed-calibration-dataset']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,dataset,preprocessed,_calib1,squad,_pickle,_seq-length.384,_packed + * `if (CM_CALIBRATE_SQUAD == on)` + * CM names: `--adr.['squad-cal', 'preprocessed-dataset']...` + - CM script: [get-preprocessed-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-squad) + * get,ml-model + * CM names: `--adr.['model-src']...` + - CM script: [get-ml-model-3d-unet-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-3d-unet-kits19) + - CM script: [get-ml-model-abtf-ssd-pytorch](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-abtf-ssd-pytorch) + - CM script: [get-ml-model-bert-base-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-base-squad) + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + - CM script: [get-ml-model-dlrm-terabyte](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-dlrm-terabyte) + - CM script: [get-ml-model-efficientnet-lite](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-efficientnet-lite) + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + - CM script: [get-ml-model-llama2](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-llama2) + - CM script: [get-ml-model-mobilenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-mobilenet) + - CM script: [get-ml-model-neuralmagic-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-neuralmagic-zoo) + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + - CM script: [get-ml-model-retinanet-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet-nvidia) + - CM script: [get-ml-model-rnnt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-rnnt) + - CM script: [get-ml-model-stable-diffusion](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-stable-diffusion) + - CM script: [get-ml-model-tiny-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-tiny-resnet) + - CM script: [get-ml-model-using-imagenet-from-model-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/calibrate-model-for.qaic/_cm.json) + +___ +### Script output +`cmr "qaic calibrate profile qaic-profile qaic-calibrate [,variations]" -j` +#### New environment keys (filter) + +* `CM_QAIC_MODEL_PROFILE_*` +#### New environment keys auto-detected from customize + +* `CM_QAIC_MODEL_PROFILE_WITH_PATH` \ No newline at end of file diff --git a/script/calibrate-model-for.qaic/_cm.json b/script/calibrate-model-for.qaic/_cm.json new file mode 100644 index 0000000000..270ed53b55 --- /dev/null +++ b/script/calibrate-model-for.qaic/_cm.json @@ -0,0 +1,223 @@ +{ + "alias": "calibrate-model-for.qaic", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML optimization", + "new_env_keys": [ + "CM_QAIC_MODEL_PROFILE_*" + ], + "deps": [ + { + "tags": "detect,cpu" + }, + { + "tags": "get,qaic,apps,sdk", + "names": [ + "qaic-apps-sdk" + ] + }, + { + "enable_if_env": + { + "CM_CALIBRATE_OPENIMAGES": [ + "yes" + ] + }, + "names": [ + "openimages-cal", + "preprocessed-dataset" + ], + "tags": "get,preprocessed,dataset,_calibration,openimages,_for.retinanet.onnx,_NCHW,_fp32,_custom-annotations" + }, + { + "enable_if_env": + { + "CM_CALIBRATE_IMAGENET": [ + "yes" + ] + }, + "names": [ + "imagenet-cal", + "preprocessed-calibration-dataset" + ], + "tags": "get,dataset,imagenet,preprocessed,_calibration,_for.resnet50,_float32,_rgb32" + }, + { + "enable_if_env": + { + "CM_CALIBRATE_SQUAD": [ + "on" + ] + }, + "names": [ + "squad-cal", + "preprocessed-dataset" + ], + "tags": "get,dataset,preprocessed,_calib1,squad,_pickle,_seq-length.384,_packed" + }, + { + "names": [ + "model-src" + ], + "tags": "get,ml-model" + } + ], + "tags": [ + "qaic", + "calibrate", + "profile", + "qaic-profile", + "qaic-calibrate" + ], + "uid": "817bad70df2f4e45", + "variations": { + "bs.1": { + "group": "batch-size", + "env": { + "CM_QAIC_MODEL_BATCH_SIZE": "1", + "CM_CREATE_INPUT_BATCH": "yes" + } + }, + "bs.#": { + "group": "batch-size", + "env": { + "CM_QAIC_MODEL_BATCH_SIZE": "#", + "CM_CREATE_INPUT_BATCH": "yes" + } + }, + "first.#": { + "adr": { + "preprocessed-dataset": { + "tags": "first.#" + } + } + }, + "mlperf.option1": { + "group": "calibration-option", + "adr": { + "preprocessed-dataset": { + "tags": "_mlperf.option1" + } + } + }, + "mlperf.option2": { + "group": "calibration-option", + "adr": { + "preprocessed-dataset": { + "tags": "_mlperf.option2" + } + } + }, + "resnet50": { + "group": "model", + "default_variations": { + "calibration-option": "mlperf.option1", + "model-framework": "tf" + }, + "env": { + "CM_QAIC_MODEL_NAME": "resnet50", + "CM_CALIBRATE_IMAGENET": "yes", + "CM_QAIC_COMPILER_ARGS": "", + "CM_QAIC_COMPILER_PARAMS": "-output-node-name=ArgMax -profiling-threads=<<>>", + "CM_QAIC_OUTPUT_NODE_NAME": "-output-node-name=ArgMax", + "CM_QAIC_MODEL_TO_CONVERT": "calibrate_resnet50_tf" + }, + "adr": { + "model-src": { + "tags": "resnet50,_tf" + } + } + }, + "tf": { + "group": "model-framework" + }, + "resnet50,tf": { + "env": { + "CM_QAIC_MODEL_TO_CONVERT": "calibrate_resnet50_tf" + }, + "adr": { + "preprocessed-dataset": { + "tags": "_NHWC" + }, + "model-src": { + "tags": "_fix-input-shape" + } + } + }, + "retinanet": { + "group": "model", + "adr": { + "model-src": { + "tags": "retinanet,_no-nms,_onnx" + } + }, + "new_env_keys": [ + "CM_QAIC_MODEL_RETINANET_*" + ], + "env": { + "CM_QAIC_MODEL_NAME": "retinanet", + "CM_CALIBRATE_OPENIMAGES": "yes", + "CM_QAIC_COMPILER_ARGS": "", + "CM_QAIC_COMPILER_PARAMS": "-enable-channelwise -profiling-threads=<<>> -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>>", + "CM_QAIC_MODEL_TO_CONVERT": "calibrate_retinanet_no_nms_mlperf" + } + }, + "bert_": { + "default-variations": { + "seq-length": "seq.384" + }, + "env": { + "CM_QAIC_MODEL_NAME": "bert-large", + "CM_CREATE_INPUT_BATCH": "no" + }, + "adr": { + "model-src": { + "tags": "bert-large,_onnx,_packed" + } + } + }, + "bert-99": { + "group": "model", + "base": [ + "bert_" + ], + "env": { + "CM_CALIBRATE_SQUAD": "yes", + "CM_QAIC_COMPILER_ARGS": "", + "CM_QAIC_COMPILER_PARAMS": "-onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> -input-list-file=<<>> -num-histogram-bins=512 -profiling-threads=<<>>", + "CM_QAIC_MODEL_TO_CONVERT": "calibrate_bert_mlperf" + } + }, + "seq.#": { + "group": "seq-length", + "env": { + "CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH": "#" + }, + "ad": { + "squad-preprocessed": { + "tags": "_seq.#" + } + } + }, + "seq.384": { + "group": "seq-length", + "env": { + "CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH": "#" + }, + "ad": { + "squad-preprocessed": { + "tags": "_seq.384" + } + } + }, + "filter-size.#": { + "group": "calib-dataset-filter-size", + "ad": { + "preprocessed-dataset": { + "tags": "_filter-size.#,_filter,_size.#" + } + } + } + } +} diff --git a/script/calibrate-model-for.qaic/customize.py b/script/calibrate-model-for.qaic/customize.py new file mode 100644 index 0000000000..62c4dbdbae --- /dev/null +++ b/script/calibrate-model-for.qaic/customize.py @@ -0,0 +1,204 @@ +from cmind import utils +import os +import sys +import yaml + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes': + r = create_batched_inputs(env) + if r['return'] > 0: + return r + + r = construct_calibration_cmd(env) + if r['return'] > 0: + return r + cmd = r['cmd'] + + print("Profiling from "+ os.getcwd()) + + env['CM_RUN_CMD'] = cmd + + return {'return':0} + +def create_batched_inputs(env): + original_images_file = env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] + batchsize = env['CM_QAIC_MODEL_BATCH_SIZE'] + + file_paths = [] + with open(original_images_file) as f: + file_paths = f.read().splitlines() + + i = 0; + outfile = None + lastfile = None + outfiles = [] + os.makedirs(os.path.join(os.getcwd(),"raw"), exist_ok = True) + for file in file_paths: + if i%int(batchsize) == 0: + filename = os.path.basename(file).replace(".rgb32", ".raw") + outfile = os.path.join(os.getcwd(),"raw", filename) + outfiles.append(outfile) + with open(outfile, "wb") as f: + pass + with open(outfile, "ab") as f: + with open(file, "rb") as infile: + f.write(infile.read()) + i = i+1 + lastfile = file + + while i%int(batchsize) != 0: + with open(outfile, "ab") as f: + with open(lastfile, "rb") as infile: + f.write(infile.read()) + i = i+1 + with open("batched_input_files", "w") as f: + f.write("\n".join(outfiles)) + + return {'return': 0} + +def construct_calibration_cmd(env): + compiler_params = env['CM_QAIC_COMPILER_PARAMS'] + batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE', "1") + cmd = env['CM_QAIC_EXEC_PATH'] + " " + if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes': + cmd += " -input-list-file=batched_input_files -batchsize="+batchsize + " " + cmd += compiler_params + " -dump-profile=profile.yaml -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return': 0, 'cmd': cmd} + +def postprocess(i): + + env = i['env'] + profile_file_path = os.path.join(os.getcwd(), "profile.yaml") + env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] = profile_file_path + + if env.get('CM_ML_MODEL_INPUT_LAYER_NAME', '') != '': + input_layer_names = [ env.get('CM_ML_MODEL_INPUT_LAYER_NAME') ] + else: + input_layer_names = [ "images:0", "images/:0" ] + + output_layer_names_conf = [ [], [] ] + output_layer_names_loc = [ [], [] ] + + output_layer_names_loc[0] = [ + "/GatherElements/:0", + "/GatherElements_1/:0", + "/GatherElements_2/:0", + "/GatherElements_3/:0", + "/GatherElements_4/:0" + ] + + output_layer_names_conf[0] = [ + "/TopK/:0", + "/TopK_1/:0", + "/TopK_2/:0", + "/TopK_3/:0", + "/TopK_4/:0" + ] + + output_layer_names_loc[1] = [ + "GatherElements_588/:0", + "GatherElements_598/:0", + "GatherElements_608/:0", + "GatherElements_618/:0", + "GatherElements_628/:0" + ] + + output_layer_names_conf[1] = [ + "TopK_570/:0", + "TopK_572/:0", + "TopK_574/:0", + "TopK_576/:0", + "TopK_578/:0" + ] + + if env.get('CM_QAIC_MODEL_NAME', '') == "retinanet": + with open(profile_file_path, "r") as stream: + try: + output_min_val_loc = sys.maxsize + output_max_val_loc = -sys.maxsize + output_min_val_conf = sys.maxsize + output_max_val_conf = -sys.maxsize + docs = yaml.load_all(stream, yaml.FullLoader) + for doc in docs: + if type(doc) == list: + + node_names = [ k['NodeOutputName'] for k in doc] + oindex = None + + for output in output_layer_names_loc: + if output[0] in node_names: + oindex = output_layer_names_loc.index(output) + break + + if oindex is None: + return {'return': 1, 'error': 'Output node names not found for the given retinanet model'} + + for k in doc: + if k["NodeOutputName"] in input_layer_names: + min_val = k['Min'] + max_val = k['Max'] + scale, offset = get_scale_offset(min_val, max_val) + env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] = scale + env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] = offset + + if k["NodeOutputName"] in output_layer_names_loc[oindex]: + min_val = k['Min'] + max_val = k['Max'] + if min_val < output_min_val_loc: + output_min_val_loc = min_val + if max_val > output_max_val_loc: + output_max_val_loc = max_val + loc_scale, loc_offset = get_scale_offset(min_val, max_val) + index = output_layer_names_loc[oindex].index(k["NodeOutputName"]) + env[f'CM_QAIC_MODEL_RETINANET_LOC_SCALE{index}'] = loc_scale + env[f'CM_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128 # to uint8 is done in NMS code + + total_range = max_val - min_val + scale = total_range/256.0 + offset = round(-min_val / scale) + + if k["NodeOutputName"] in output_layer_names_conf[oindex]: + min_val = k['Min'] + max_val = k['Max'] + if min_val < output_min_val_conf: + output_min_val_conf = min_val + if max_val > output_max_val_conf: + output_max_val_conf = max_val + conf_scale, conf_offset = get_scale_offset(min_val, max_val) + index = output_layer_names_conf[oindex].index(k["NodeOutputName"]) + env[f'CM_QAIC_MODEL_RETINANET_CONF_SCALE{index}'] = conf_scale + env[f'CM_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128 # to uint8 is done in NMS code + total_range = max_val - min_val + scale = total_range/256.0 + offset = round(-min_val / scale) + + loc_scale, loc_offset = get_scale_offset(output_min_val_loc, output_max_val_loc) + conf_scale, conf_offset = get_scale_offset(output_min_val_conf, output_max_val_conf) + env['CM_QAIC_MODEL_RETINANET_LOC_SCALE'] = loc_scale + env['CM_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code + env['CM_QAIC_MODEL_RETINANET_CONF_SCALE'] = conf_scale + env['CM_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code + + except yaml.YAMLError as exc: + return {'return': 1, 'error': exc} + + return {'return':0} + +def get_scale_offset(min_val, max_val): + total_range = max_val - min_val + scale = total_range/256.0 + offset = round(-min_val / scale) + return scale, offset + diff --git a/script/calibrate-model-for.qaic/run.sh b/script/calibrate-model-for.qaic/run.sh new file mode 100644 index 0000000000..59b1aed3dc --- /dev/null +++ b/script/calibrate-model-for.qaic/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "$CM_RUN_CMD" + diff --git a/script/compile-model-for.qaic/README.md b/script/compile-model-for.qaic/README.md new file mode 100644 index 0000000000..92cf19fce7 --- /dev/null +++ b/script/compile-model-for.qaic/README.md @@ -0,0 +1,435 @@ +Automatically generated README for this automation recipe: **compile-model-for.qaic** + +Category: **AI/ML optimization** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=compile-model-for.qaic,3f0f43b5d0304d1c) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *qaic,compile,model,model-compile,qaic-compile* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "qaic compile model model-compile qaic-compile" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=qaic,compile,model,model-compile,qaic-compile` + +`cm run script --tags=qaic,compile,model,model-compile,qaic-compile[,variations] [--input_flags]` + +*or* + +`cmr "qaic compile model model-compile qaic-compile"` + +`cmr "qaic compile model model-compile qaic-compile [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'qaic,compile,model,model-compile,qaic-compile' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="qaic,compile,model,model-compile,qaic-compile"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=qaic,compile,model,model-compile,qaic-compile) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "qaic compile model model-compile qaic-compile[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_bert-99` + - Environment variables: + - *CM_COMPILE_BERT*: `on` + - *CM_QAIC_MODEL_TO_CONVERT*: `calibrate_bert_mlperf` + - *CM_QAIC_MODEL_COMPILER_PARAMS_BASE*: `-aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -multicast-weights -combine-inputs=false -combine-outputs=false` + - *CM_QAIC_MODEL_COMPILER_ARGS*: `` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * calibrate,qaic,_bert-99 + * CM names: `--adr.['bert-profile', 'qaic-profile']...` + - CM script: [calibrate-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/calibrate-model-for.qaic) + * `_bert-99,offline` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS*: `-allocator-dealloc-delay=2 -size-split-granularity=1536 -vtcm-working-set-limit-ratio=1` + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=1 -mos=1 -ols=2` + - Workflow: + * `_bert-99,offline,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=1 -mos=1 -ols=3` + - Workflow: + * `_bert-99,offline,nsp.16` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=1 -mos=1 -ols=2` + - Workflow: + * `_bert-99,server` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS*: `-allocator-dealloc-delay=2 -size-split-granularity=1536 -vtcm-working-set-limit-ratio=1` + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=1 -mos=1 -ols=3` + - Workflow: + * `_bert-99,server,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=1 -mos=1 -ols=3` + - Workflow: + * `_bert-99,singlestream` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS*: `` + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=8 -mos=8 -ols=1` + - Workflow: + * `_bert-99,singlestream,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=8 -mos=8 -ols=1` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_COMPILE_BERT*: `on` + - *CM_QAIC_MODEL_TO_CONVERT*: `bert_mlperf` + - *CM_QAIC_MODEL_COMPILER_PARAMS_BASE*: `-aic-hw -aic-hw-version=2.0 -convert-to-fp16 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -combine-inputs=false -combine-outputs=false` + - *CM_QAIC_MODEL_COMPILER_ARGS*: `` + - Workflow: + * `_bert-99.9,offline` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=2 -mos=1 -ols=2` + - Workflow: + * `_bert-99.9,offline,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=2 -mos=1 -ols=2` + - Workflow: + * `_bert-99.9,offline,nsp.16` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=2 -mos=1 -ols=2` + - Workflow: + * `_bert-99.9,server` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=2` + - Workflow: + * `_bert-99.9,server,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=2` + - Workflow: + * `_resnet50` + - Environment variables: + - *CM_COMPILE_RESNET*: `on` + - *CM_QAIC_MODEL_TO_CONVERT*: `compile_resnet50_tf` + - *CM_QAIC_MODEL_COMPILER_PARAMS_BASE*: `-aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1` + - Workflow: + * `_resnet50,multistream` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS*: `` + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=4 -mos=1 -ols=1` + - Workflow: + * `_resnet50,multistream,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=4` + - Workflow: + * `_resnet50,offline` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS*: `-sdp-cluster-sizes=2,2 -multicast-weights` + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=4 -mos=1,2 -ols=4` + - Workflow: + * `_resnet50,offline,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=4 -mos=1,2 -ols=4` + - Workflow: + * `_resnet50,server` + - Workflow: + * `_resnet50,server,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=4 -ols=4` + - *CM_QAIC_MODEL_COMPILER_ARGS*: `-sdp-cluster-sizes=2,2 -mos=1,2 -multicast-weights` + - Workflow: + * `_resnet50,server,nsp.16` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=4 -ols=4` + - *CM_QAIC_MODEL_COMPILER_ARGS*: `-sdp-cluster-sizes=4,4 -mos=1,4` + - Workflow: + * `_resnet50,singlestream` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS*: `-aic-num-of-instances=1` + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=8 -mos=1 -ols=1` + - Workflow: + * `_resnet50,singlestream,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=8 -mos=1 -ols=1` + - Workflow: + * `_resnet50,tf` + - Environment variables: + - *CM_QAIC_MODEL_TO_CONVERT*: `calibrate_resnet50_tf` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_COMPILE_RETINANET*: `on` + - *CM_QAIC_MODEL_TO_CONVERT*: `calibrate_retinanet_no_nms_mlperf` + - *CM_QAIC_MODEL_COMPILER_ARGS*: `-aic-enable-depth-first` + - *CM_QAIC_MODEL_COMPILER_PARAMS_BASE*: `-aic-hw -aic-hw-version=2.0 -compile-only -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> -quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric -quantization-calibration=None` + - Workflow: + * `_retinanet,multistream` + - Workflow: + * `_retinanet,nsp.14` + - Workflow: + * `_retinanet,offline` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=1 -mos=1 -ols=1` + - Workflow: + * `_retinanet,offline,nsp.14` + - Workflow: + * `_retinanet,server` + - Workflow: + * `_retinanet,server,nsp.14` + - Workflow: + * `_retinanet,singlestream` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS*: `` + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=8 -mos=1 -ols=1` + - Workflow: + * `_retinanet,singlestream,nsp.14` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_ARGS_SUT*: `-aic-num-cores=8 -mos=1 -ols=1` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_bs.#` + - Environment variables: + - *CM_QAIC_MODEL_BATCH_SIZE*: `#` + - Workflow: + * `_bs.1` + - Environment variables: + - *CM_QAIC_MODEL_BATCH_SIZE*: `1` + - Workflow: + +
+ + + * Group "**calib-dataset-filter-size**" +
+ Click here to expand this section. + + * `_filter-size.#` + - Workflow: + +
+ + + * Group "**mlperf-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Workflow: + * `_offline` + - Workflow: + * `_server` + - Workflow: + * **`_singlestream`** (default) + - Workflow: + +
+ + + * Group "**model-framework**" +
+ Click here to expand this section. + + * `_tf` + - Workflow: + +
+ + + * Group "**nsp**" +
+ Click here to expand this section. + + * `_nsp.14` + - Workflow: + * `_nsp.16` + - Workflow: + * `_nsp.8` + - Workflow: + * `_nsp.9` + - Workflow: + +
+ + + * Group "**percentile-calibration**" +
+ Click here to expand this section. + + * `_pc.#` + - Environment variables: + - *CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE*: `#` + - *CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS*: `-quantization-calibration=Percentile -percentile-calibration-value=<<>>` + - Workflow: + +
+ + + * Group "**quantization**" +
+ Click here to expand this section. + + * `_no-quantized` + - Environment variables: + - *CM_QAIC_MODEL_QUANTIZATION*: `no` + - Workflow: + * **`_quantized`** (default) + - Environment variables: + - *CM_QAIC_MODEL_QUANTIZATION*: `yes` + - Workflow: + +
+ + +#### Default variations + +`_quantized,_singlestream` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--register=value` → `CM_REGISTER_CACHE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "register":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,qaic,apps,sdk + * `if (CM_REGISTER_CACHE != on)` + * CM names: `--adr.['qaic-apps-sdk']...` + - CM script: [get-qaic-apps-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-qaic-apps-sdk) + * qaic,calibrate,_retinanet + * `if (CM_COMPILE_RETINANET == yes)` + * CM names: `--adr.['retinanet-profile', 'qaic-profile']...` + - CM script: [calibrate-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/calibrate-model-for.qaic) + * qaic,calibrate,_resnet50 + * `if (CM_COMPILE_RESNET == on) AND (CM_REGISTER_CACHE != on)` + * CM names: `--adr.['resnet-profile', 'qaic-profile']...` + - CM script: [calibrate-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/calibrate-model-for.qaic) + * get,ml-model + * CM names: `--adr.['model-src']...` + - CM script: [get-ml-model-3d-unet-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-3d-unet-kits19) + - CM script: [get-ml-model-abtf-ssd-pytorch](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-abtf-ssd-pytorch) + - CM script: [get-ml-model-bert-base-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-base-squad) + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + - CM script: [get-ml-model-dlrm-terabyte](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-dlrm-terabyte) + - CM script: [get-ml-model-efficientnet-lite](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-efficientnet-lite) + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + - CM script: [get-ml-model-llama2](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-llama2) + - CM script: [get-ml-model-mobilenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-mobilenet) + - CM script: [get-ml-model-neuralmagic-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-neuralmagic-zoo) + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + - CM script: [get-ml-model-retinanet-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet-nvidia) + - CM script: [get-ml-model-rnnt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-rnnt) + - CM script: [get-ml-model-stable-diffusion](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-stable-diffusion) + - CM script: [get-ml-model-tiny-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-tiny-resnet) + - CM script: [get-ml-model-using-imagenet-from-model-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-model-for.qaic/_cm.json) + +___ +### Script output +`cmr "qaic compile model model-compile qaic-compile [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_QAIC_MODEL*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH` +* `CM_QAIC_MODEL_FINAL_COMPILATION_CMD` \ No newline at end of file diff --git a/script/compile-model-for.qaic/_cm.json b/script/compile-model-for.qaic/_cm.json new file mode 100644 index 0000000000..ca305b860d --- /dev/null +++ b/script/compile-model-for.qaic/_cm.json @@ -0,0 +1,389 @@ +{ + "alias": "compile-model-for.qaic", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML optimization", + "new_env_keys": [ + "CM_QAIC_MODEL*", + "CM_ML_MODEL_FILE_WITH_PATH" + ], + "input_mapping": { + "register": "CM_REGISTER_CACHE" + }, + "deps": [ + { + "tags": "detect,cpu" + }, + { + "tags": "get,qaic,apps,sdk", + "names": [ + "qaic-apps-sdk" + ], + "skip_if_env": { + "CM_REGISTER_CACHE": [ + "on" + ] + } + }, + { + "enable_if_env": + { + "CM_COMPILE_RETINANET": [ + "yes" + ] + }, + "names": [ + "retinanet-profile", + "qaic-profile" + ], + "tags": "qaic,calibrate,_retinanet" + }, + { + "enable_if_env": + { + "CM_COMPILE_RESNET": [ + "on" + ] + }, + "skip_if_env": { + "CM_REGISTER_CACHE": [ + "on" + ] + }, + "names": [ + "resnet-profile", + "qaic-profile" + ], + "tags": "qaic,calibrate,_resnet50" + }, + { + "names": [ + "model-src" + ], + "tags": "get,ml-model" + } + ], + "tags": [ + "qaic", + "compile", + "model", + "model-compile", + "qaic-compile" + ], + "uid": "3f0f43b5d0304d1c", + "variations": { + "bs.1": { + "group": "batch-size", + "env": { + "CM_QAIC_MODEL_BATCH_SIZE": "1" + }, + "adr": { + "qaic-profile": { + "tags": "_bs.1" + } + } + }, + "bs.#": { + "group": "batch-size", + "env": { + "CM_QAIC_MODEL_BATCH_SIZE": "#" + }, + "adr": { + "qaic-profile": { + "tags": "_bs.#" + } + } + }, + "quantized": { + "group": "quantization", + "default": true, + "env": { + "CM_QAIC_MODEL_QUANTIZATION": "yes" + } + }, + "no-quantized": { + "group": "quantization", + "env": { + "CM_QAIC_MODEL_QUANTIZATION": "no" + } + }, + "offline": { + "group": "mlperf-scenario" + }, + "server": { + "group": "mlperf-scenario" + }, + "singlestream": { + "group": "mlperf-scenario", + "default": true + }, + "multistream": { + "group": "mlperf-scenario" + }, + "nsp.14": { + "group": "nsp" + }, + "nsp.16": { + "group": "nsp" + }, + "nsp.8": { + "group": "nsp" + }, + "nsp.9": { + "group": "nsp" + }, + "resnet50": { + "default_variations": { + "model-framework": "tf" + }, + "env": { + "CM_COMPILE_RESNET": "on", + "CM_QAIC_MODEL_TO_CONVERT": "compile_resnet50_tf", + "CM_QAIC_MODEL_COMPILER_PARAMS_BASE": "-aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1" + }, + "adr": { + "model-src": { + "tags": "resnet50,_tf" + } + } + }, + "resnet50,offline": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS": "-sdp-cluster-sizes=2,2 -multicast-weights", + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=4 -mos=1,2 -ols=4" + } + }, + "resnet50,offline,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=4 -mos=1,2 -ols=4" + }, + "default_variations": { + "batch-size": "bs.8" + } + }, + "resnet50,server,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=4 -ols=4", + "CM_QAIC_MODEL_COMPILER_ARGS": "-sdp-cluster-sizes=2,2 -mos=1,2 -multicast-weights" + }, + "default_variations": { + "batch-size": "bs.8" + } + }, + "resnet50,server,nsp.16": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=4 -ols=4", + "CM_QAIC_MODEL_COMPILER_ARGS": "-sdp-cluster-sizes=4,4 -mos=1,4" + }, + "default_variations": { + "batch-size": "bs.8" + } + }, + "resnet50,multistream,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=4" + }, + "default_variations": { + "batch-size": "bs.1" + } + }, + "resnet50,singlestream,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=8 -mos=1 -ols=1" + }, + "default_variations": { + "batch-size": "bs.1" + } + }, + "resnet50,singlestream": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS": "-aic-num-of-instances=1", + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=8 -mos=1 -ols=1" + } + }, + "resnet50,multistream": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS": "", + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=4 -mos=1 -ols=1" + } + }, + "resnet50,server": { + "env": { + } + }, + "bert-99": { + "adr": { + "model-src": { + "tags": "bert-large,_onnx,_packed" + } + }, + "env": { + "CM_COMPILE_BERT": "on", + "CM_QAIC_MODEL_TO_CONVERT": "calibrate_bert_mlperf", + "CM_QAIC_MODEL_COMPILER_PARAMS_BASE": "-aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -multicast-weights -combine-inputs=false -combine-outputs=false", + "CM_QAIC_MODEL_COMPILER_ARGS": "" + }, + "deps": [ + { + "tags": "calibrate,qaic,_bert-99", + "names": [ + "bert-profile", + "qaic-profile" + ] + } + ] + }, + "bert-99,offline": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS": "-allocator-dealloc-delay=2 -size-split-granularity=1536 -vtcm-working-set-limit-ratio=1", + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=1 -mos=1 -ols=2" + } + }, + "bert-99,offline,nsp.16": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=1 -mos=1 -ols=2" + } + }, + "bert-99,offline,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=1 -mos=1 -ols=3" + } + }, + "bert-99,server": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS": "-allocator-dealloc-delay=2 -size-split-granularity=1536 -vtcm-working-set-limit-ratio=1", + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=1 -mos=1 -ols=3" + } + }, + "bert-99,server,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=1 -mos=1 -ols=3" + } + }, + "bert-99,singlestream": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS": "", + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=8 -mos=8 -ols=1" + } + }, + "bert-99,singlestream,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=8 -mos=8 -ols=1" + } + }, + "bert-99.9": { + "base": [ + "no-quantized" + ], + "adr": { + "model-src": { + "tags": "bert-large,_onnx,_packed" + } + }, + "env": { + "CM_COMPILE_BERT": "on", + "CM_QAIC_MODEL_TO_CONVERT": "bert_mlperf", + "CM_QAIC_MODEL_COMPILER_PARAMS_BASE": "-aic-hw -aic-hw-version=2.0 -convert-to-fp16 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -combine-inputs=false -combine-outputs=false", + "CM_QAIC_MODEL_COMPILER_ARGS": "" + } + }, + "bert-99.9,offline": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=2 -mos=1 -ols=2" + } + }, + "bert-99.9,offline,nsp.16": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=2 -mos=1 -ols=2" + } + }, + "bert-99.9,offline,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=2 -mos=1 -ols=2" + } + }, + "bert-99.9,server": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=2" + } + }, + "bert-99.9,server,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=2" + } + }, + "tf": { + "group": "model-framework" + }, + "resnet50,tf": { + "env": { + "CM_QAIC_MODEL_TO_CONVERT": "calibrate_resnet50_tf" + }, + "ad": { + "model-src": { + "tags": "_fix-input-shape" + } + } + }, + "retinanet": { + "adr": { + "model-src": { + "tags": "retinanet,_no-nms" + } + }, + "new_env_keys": [ + "CM_QAIC_MODEL_RETINANET_*" + ], + "env": { + "CM_COMPILE_RETINANET": "on", + "CM_QAIC_MODEL_TO_CONVERT": "calibrate_retinanet_no_nms_mlperf", + "CM_QAIC_MODEL_COMPILER_ARGS": "-aic-enable-depth-first", + "CM_QAIC_MODEL_COMPILER_PARAMS_BASE": "-aic-hw -aic-hw-version=2.0 -compile-only -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> -quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric -quantization-calibration=None" + } + }, + "retinanet,offline": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=1 -mos=1 -ols=1" + } + }, + "retinanet,offline,nsp.14": { + }, + "retinanet,nsp.14": { + "env": { + } + }, + "retinanet,server": { + }, + "retinanet,server,nsp.14": { + }, + "retinanet,singlestream": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS": "", + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=8 -mos=1 -ols=1" + } + }, + "retinanet,singlestream,nsp.14": { + "env": { + "CM_QAIC_MODEL_COMPILER_ARGS_SUT": "-aic-num-cores=8 -mos=1 -ols=1" + } + }, + "retinanet,multistream": { + }, + "pc.#": { + "group": "percentile-calibration", + "env": { + "CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE": "#", + "CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS": "-quantization-calibration=Percentile -percentile-calibration-value=<<>>" + } + }, + "filter-size.#": { + "group": "calib-dataset-filter-size", + "ad": { + "qaic-profile": { + "tags": "_filter-size.#" + } + } + } + } +} diff --git a/script/compile-model-for.qaic/customize.py b/script/compile-model-for.qaic/customize.py new file mode 100644 index 0000000000..1e178f1897 --- /dev/null +++ b/script/compile-model-for.qaic/customize.py @@ -0,0 +1,73 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_REGISTER_CACHE', '') == '': + + r = construct_compilation_cmd(env) + if r['return'] > 0: + return r + cmd = r['cmd'] + + print("Compiling from "+ os.getcwd()) + + env['CM_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd + + env['CM_RUN_CMD'] = cmd + else: + import shutil + print("Creating cache entry from " + env['CM_REGISTER_CACHE'] + " to " + os.getcwd()) + r = shutil.copytree(env['CM_REGISTER_CACHE'], os.path.join(os.getcwd(), "elfs")) + print(r) + + return {'return':0} + +def construct_compilation_cmd(env): + compiler_params_base = env['CM_QAIC_MODEL_COMPILER_PARAMS_BASE'] + compiler_args = env['CM_QAIC_MODEL_COMPILER_ARGS'] + ' ' + env.get('CM_QAIC_MODEL_COMPILER_ARGS_SUT', '') + batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE') + + if env.get('CM_QAIC_MODEL_QUANTIZATION', '') == 'yes': + profile_string = " -load-profile=" + env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] + else: + profile_string = '' + + compiler_params = compiler_params_base + ' ' + compiler_args + + if batchsize: + compiler_params += " -batchsize="+batchsize + + percentile_calibration_params = env.get('CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS') + if percentile_calibration_params: + compiler_params += " " + percentile_calibration_params + + aic_binary_dir = os.path.join(os.getcwd(), "elfs") + + cmd = env['CM_QAIC_EXEC_PATH'] + \ + " -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + profile_string + ' -aic-binary-dir=' + aic_binary_dir + ' ' \ + + compiler_params + + return {'return': 0, 'cmd': cmd} + +def postprocess(i): + + env = i['env'] + env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join(os.getcwd(), "elfs", "programqpc.bin") + if not os.path.isdir(os.path.join(os.getcwd(), "elfs")): + return {'return': 1, 'error': 'elfs directory not found inside the compiled directory'} + + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] + + return {'return':0} diff --git a/script/compile-model-for.qaic/run.sh b/script/compile-model-for.qaic/run.sh new file mode 100644 index 0000000000..c5c3c04cb5 --- /dev/null +++ b/script/compile-model-for.qaic/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "rm -rf elfs" +run "$CM_RUN_CMD" diff --git a/script/compile-program/README-extra.md b/script/compile-program/README-extra.md new file mode 100644 index 0000000000..cdafe52f6d --- /dev/null +++ b/script/compile-program/README-extra.md @@ -0,0 +1,3 @@ +# About + +This script compiles C and C++ programs. diff --git a/script/compile-program/README.md b/script/compile-program/README.md new file mode 100644 index 0000000000..5d909e2eea --- /dev/null +++ b/script/compile-program/README.md @@ -0,0 +1,130 @@ +Automatically generated README for this automation recipe: **compile-program** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=compile-program,c05042ba005a4bfa) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program` + +`cm run script --tags=compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program ` + +*or* + +`cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program"` + +`cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* SKIP_RECOMPILE: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,compiler + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + * get,compiler-flags + - CM script: [get-compiler-flags](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-compiler-flags) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/compile-program/_cm.json) + +___ +### Script output +`cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/compile-program/_cm.json b/script/compile-program/_cm.json new file mode 100644 index 0000000000..af07199793 --- /dev/null +++ b/script/compile-program/_cm.json @@ -0,0 +1,36 @@ +{ + "alias": "compile-program", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "DevOps automation", + "clean_files": [ + "tmp-run.out" + ], + "deps": [ + { + "tags": "detect,cpu" + }, + { + "names": [ + "compiler" + ], + "tags": "get,compiler" + }, + { + "tags": "get,compiler-flags" + } + ], + "default_env": { + "SKIP_RECOMPILE": "no" + }, + "tags": [ + "compile", + "program", + "c-program", + "cpp-program", + "compile-program", + "compile-c-program", + "compile-cpp-program" + ], + "uid": "c05042ba005a4bfa" +} diff --git a/script/compile-program/customize.py b/script/compile-program/customize.py new file mode 100644 index 0000000000..73a3eeb82b --- /dev/null +++ b/script/compile-program/customize.py @@ -0,0 +1,54 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + CPPFLAGS = env.get('+ CPPFLAGS', []) + env['CM_C_COMPILER_FLAGS'] = " ".join(env.get('+ CFLAGS', []) + CPPFLAGS) + env['CM_CXX_COMPILER_FLAGS'] = " ".join(env.get('+ CXXFLAGS', []) + CPPFLAGS) + env['CM_F_COMPILER_FLAGS'] = " ".join(env.get('+ FFLAGS', [])) + + CPATH = env.get('+CPATH', [ ]) + env['CM_C_INCLUDE_PATH'] = " -I".join([" "] + env.get('+C_INCLUDE_PATH', []) + CPATH) + env['CM_CPLUS_INCLUDE_PATH'] = " -I".join([" "] + env.get('+CPLUS_INCLUDE_PATH', []) + CPATH) + env['CM_F_INCLUDE_PATH'] = " -I".join([" "] + env.get('+F_INCLUDE_PATH', []) + CPATH) + + # If windows, need to extend it more ... + if os_info['platform'] == 'windows' and env.get('CM_COMPILER_FAMILY','')!='LLVM': + print ("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows") + return {'return':0} + + LDFLAGS = env.get('+ LDFLAGS', []) + + env['CM_C_LINKER_FLAGS'] = " ".join(env.get('+ LDCFLAGS', []) + LDFLAGS) + env['CM_CXX_LINKER_FLAGS'] = " ".join(env.get('+ LDCXXFLAGS', []) + LDFLAGS) + env['CM_F_LINKER_FLAGS'] = " ".join(env.get('+ LDFFLAGS', []) + LDFLAGS) + + if env.get('CM_LINKER_LANG', 'C') == "C": + env['CM_LINKER_BIN'] = env['CM_C_COMPILER_BIN'] + env['CM_LINKER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH'] + env['CM_LINKER_COMPILE_FLAGS'] = env['CM_C_COMPILER_FLAGS'] + env['CM_LINKER_FLAGS'] = env['CM_C_LINKER_FLAGS'] + + elif env.get('CM_LINKER_LANG', 'C') == "CXX": + env['CM_LINKER_BIN'] = env['CM_CXX_COMPILER_BIN'] + env['CM_LINKER_WITH_PATH'] = env['CM_CXX_COMPILER_WITH_PATH'] + env['CM_LINKER_COMPILE_FLAGS'] = env['CM_CXX_COMPILER_FLAGS'] + env['CM_LINKER_FLAGS'] = env['CM_CXX_LINKER_FLAGS'] + + elif env.get('CM_LINKER_LANG', 'C') == "F": + env['CM_LINKER_BIN'] = env['CM_F_COMPILER_BIN'] + env['CM_LINKER_WITH_PATH'] = env['CM_F_COMPILER_WITH_PATH'] + env['CM_LINKER_COMPILE_FLAGS'] = env['CM_F_COMPILER_FLAGS'] + env['CM_LINKER_FLAGS'] = env['CM_F_LINKER_FLAGS'] + + env['CM_LD_LIBRARY_PATH'] = " -L".join([" " ] + env.get('+LD_LIBRARY_PATH', [])) + env['CM_SOURCE_FOLDER_PATH'] = env['CM_SOURCE_FOLDER_PATH'] if 'CM_SOURCE_FOLDER_PATH' in env else env['CM_TMP_CURRENT_SCRIPT_PATH'] if 'CM_TMP_CURRENT_SCRIPT_PATH' in env else '' + + return {'return':0} + +def postprocess(i): + + return {'return':0} diff --git a/script/compile-program/run.bat b/script/compile-program/run.bat new file mode 100644 index 0000000000..ece5d9e9c0 --- /dev/null +++ b/script/compile-program/run.bat @@ -0,0 +1,35 @@ +rem Compile program + +set BIN_NAME=%CM_BIN_NAME% +IF NOT DEFINED CM_BIN_NAME SET BIN_NAME=run.exe + +set RUN_DIR=%CM_RUN_DIR% +IF NOT DEFINED CM_RUN_DIR SET RUN_DIR=. + +echo. +echo Checking compiler version ... +echo. + +"%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAG_VERSION% + +echo. +echo Compiling source files ... +echo. + +if not exist %RUN_DIR% mkdir %RUN_DIR% + +cd %CM_SOURCE_FOLDER_PATH% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +if not "%CM_C_SOURCE_FILES%" == "" ( + echo %CM_C_COMPILER_WITH_PATH% %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + "%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) + +if not "%CM_CXX_SOURCE_FILES%" == "" ( + echo %CM_CXX_COMPILER_WITH_PATH% %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + "%CM_CXX_COMPILER_WITH_PATH%" %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) + diff --git a/script/compile-program/run.sh b/script/compile-program/run.sh new file mode 100644 index 0000000000..7e98bc47d3 --- /dev/null +++ b/script/compile-program/run.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# Compile + +BIN_NAME=${CM_BIN_NAME:-run.out} +RUN_DIR=${CM_RUN_DIR:-.} +echo "RUN_DIR=$RUN_DIR" + +if [[ ${CM_SKIP_RECOMPILE} == "yes" ]]; then + if [ -f ${RUN_DIR}/${BIN_NAME} ]; then + exit 0 + fi +fi + +rm -f ${RUN_DIR}/${BIN_NAME} + +if [ -z "${CM_SOURCE_FOLDER_PATH}" ]; then + echo "No source directory (CM_SOURCE_FOLDER_PATH} specified" + exit 1 +fi + +if [[ -z "${CM_C_SOURCE_FILES}" && -z "${CM_CXX_SOURCE_FILES}" && -z "${CM_F_SOURCE_FILES}" ]]; then + echo "No source files (CM_C_SOURCE_FILES or CM_CXX_SOURCE_FILES or CM_F_SOURCE_FILES) specified" + exit 1 +fi + +echo "" +echo "Checking compiler version ..." +echo "" + +${CM_C_COMPILER_WITH_PATH} ${CM_C_COMPILER_FLAG_VERSION} + +echo "" +echo "Compiling source files ..." +echo "" + +cd ${CM_SOURCE_FOLDER_PATH} +test $? -eq 0 || exit 1 + +IFS=';' read -ra FILES <<< "${CM_C_SOURCE_FILES}" +for file in "${FILES[@]}"; do + base="$(basename -- $file)" + base_name=${base%.*} + echo $base + echo $basename + CMD="${CM_C_COMPILER_WITH_PATH} -c ${CM_C_COMPILER_FLAGS} ${CM_C_INCLUDE_PATH} $file ${CM_C_COMPILER_FLAG_OUTPUT}$base_name.o" + echo $CMD + eval $CMD + test $? -eq 0 || exit 1 +done + +IFS=';' read -ra FILES <<< "${CM_CXX_SOURCE_FILES}" +for file in "${FILES[@]}"; do + base="$(basename -- $file)" + base_name=${base%.*} + echo $base + echo $basename + CMD="${CM_CXX_COMPILER_WITH_PATH} -c ${CM_CXX_COMPILER_FLAGS} ${CM_CPLUS_INCLUDE_PATH} $file ${CM_CXX_COMPILER_FLAG_OUTPUT}$base_name.o" + echo $CMD + eval $CMD + test $? -eq 0 || exit 1 +done + + +echo "" +echo "Linking ..." +echo "" +CMD="${CM_LINKER_WITH_PATH} ${CM_LINKER_COMPILE_FLAGS} *.o -o ${RUN_DIR}/${BIN_NAME} ${CM_LD_LIBRARY_PATH} ${CM_LINKER_FLAGS}" +echo $CMD +eval $CMD + +test $? -eq 0 || exit 1 diff --git a/script/convert-csv-to-md/README.md b/script/convert-csv-to-md/README.md new file mode 100644 index 0000000000..34a4faf22e --- /dev/null +++ b/script/convert-csv-to-md/README.md @@ -0,0 +1,145 @@ +Automatically generated README for this automation recipe: **convert-csv-to-md** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=convert-csv-to-md,200a95b80bee4a25) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *csv-to-md,convert,to-md,from-csv* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "csv-to-md convert to-md from-csv" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=csv-to-md,convert,to-md,from-csv` + +`cm run script --tags=csv-to-md,convert,to-md,from-csv [--input_flags]` + +*or* + +`cmr "csv-to-md convert to-md from-csv"` + +`cmr "csv-to-md convert to-md from-csv " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'csv-to-md,convert,to-md,from-csv' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="csv-to-md,convert,to-md,from-csv"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=csv-to-md,convert,to-md,from-csv) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "csv-to-md convert to-md from-csv" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--csv_file=value` → `CM_CSV_FILE=value` +* `--md_file=value` → `CM_MD_FILE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "csv_file":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/_cm.json)*** + * get,python3 + * CM names: `--adr.['python, python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_pandas + * CM names: `--adr.['pandas']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.tabulate + * CM names: `--adr.['tabulate']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-csv-to-md/_cm.json) + +___ +### Script output +`cmr "csv-to-md convert to-md from-csv " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/convert-csv-to-md/_cm.json b/script/convert-csv-to-md/_cm.json new file mode 100644 index 0000000000..ff7b6f7838 --- /dev/null +++ b/script/convert-csv-to-md/_cm.json @@ -0,0 +1,42 @@ +{ + "alias": "convert-csv-to-md", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "DevOps automation", + "deps": [ + { + "names": [ "python, python3" ], + "tags": "get,python3" + }, + { + "names": [ "pandas" ], + "tags": "get,generic-python-lib,_pandas", + "version_min": "1.0" + }, + { + "names": [ "tabulate" ], + "tags": "get,generic-python-lib,_package.tabulate" + } + ], + "docker_input_mapping": {}, + "input_description": {}, + "input_mapping": { + "csv_file": "CM_CSV_FILE", + "md_file": "CM_MD_FILE" + }, + "new_env_keys": [], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "csv-to-md", + "convert", + "to-md", + "from-csv" + ], + "uid": "200a95b80bee4a25", + "variations": {}, + "versions": {} +} diff --git a/script/convert-csv-to-md/customize.py b/script/convert-csv-to-md/customize.py new file mode 100644 index 0000000000..8181e9437f --- /dev/null +++ b/script/convert-csv-to-md/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + csv_file = env.get('CM_CSV_FILE', '') + md_file = env.get('CM_MD_FILE', '') + process_file = os.path.join(i['run_script_input']['path'], "process.py") + + env['CM_RUN_CMD'] = '{} {} {} {} '.format(env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/convert-csv-to-md/process.py b/script/convert-csv-to-md/process.py new file mode 100644 index 0000000000..e010441d3e --- /dev/null +++ b/script/convert-csv-to-md/process.py @@ -0,0 +1,10 @@ +import pandas as pd +import sys + +csv_file = sys.argv[1] if len(sys.argv) > 1 else "summary.csv" +md_file = sys.argv[2] if len(sys.argv) > 2 else "converted.md" + +df=pd.read_csv(csv_file, engine='python') + +with open(md_file, "w") as md: + df.to_markdown(buf=md) diff --git a/script/convert-csv-to-md/run.bat b/script/convert-csv-to-md/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/convert-csv-to-md/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/convert-csv-to-md/run.sh b/script/convert-csv-to-md/run.sh new file mode 100644 index 0000000000..59b1aed3dc --- /dev/null +++ b/script/convert-csv-to-md/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "$CM_RUN_CMD" + diff --git a/script/convert-ml-model-huggingface-to-onnx/README.md b/script/convert-ml-model-huggingface-to-onnx/README.md new file mode 100644 index 0000000000..ecc8afcd28 --- /dev/null +++ b/script/convert-ml-model-huggingface-to-onnx/README.md @@ -0,0 +1,145 @@ +Automatically generated README for this automation recipe: **convert-ml-model-huggingface-to-onnx** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=convert-ml-model-huggingface-to-onnx,eacb01655d7e49ac) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *ml-model,model,huggingface-to-onnx,onnx,huggingface,convert* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "ml-model model huggingface-to-onnx onnx huggingface convert" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=ml-model,model,huggingface-to-onnx,onnx,huggingface,convert` + +`cm run script --tags=ml-model,model,huggingface-to-onnx,onnx,huggingface,convert[,variations] ` + +*or* + +`cmr "ml-model model huggingface-to-onnx onnx huggingface convert"` + +`cmr "ml-model model huggingface-to-onnx onnx huggingface convert [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'ml-model,model,huggingface-to-onnx,onnx,huggingface,convert' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="ml-model,model,huggingface-to-onnx,onnx,huggingface,convert"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=ml-model,model,huggingface-to-onnx,onnx,huggingface,convert) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "ml-model model huggingface-to-onnx onnx huggingface convert[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_model-path.#` + - Environment variables: + - *CM_MODEL_HUGG_PATH*: `#` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnxruntime + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/convert-ml-model-huggingface-to-onnx/_cm.json) + +___ +### Script output +`cmr "ml-model model huggingface-to-onnx onnx huggingface convert [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL*` +* `CM_MODEL_HUGG_PATH` +* `HUGGINGFACE_ONNX_FILE_PATH` +#### New environment keys auto-detected from customize diff --git a/script/convert-ml-model-huggingface-to-onnx/_cm.json b/script/convert-ml-model-huggingface-to-onnx/_cm.json new file mode 100644 index 0000000000..e836a5aa96 --- /dev/null +++ b/script/convert-ml-model-huggingface-to-onnx/_cm.json @@ -0,0 +1,44 @@ +{ + "alias": "convert-ml-model-huggingface-to-onnx", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_transformers" + }, + { + "tags": "get,generic-python-lib,_onnxruntime" + } + ], + "env": {}, + "new_env_keys": [ + "CM_ML_MODEL*", + "CM_MODEL_HUGG_PATH", + "HUGGINGFACE_ONNX_FILE_PATH" + ], + "tags": [ + "ml-model", + "model", + "huggingface-to-onnx", + "onnx", + "huggingface", + "convert" + ], + "uid": "eacb01655d7e49ac", + "variations": { + "model-path.#": { + "env": { + "CM_MODEL_HUGG_PATH": "#" + } + } + } +} diff --git a/script/convert-ml-model-huggingface-to-onnx/customize.py b/script/convert-ml-model-huggingface-to-onnx/customize.py new file mode 100644 index 0000000000..e02a1fb6af --- /dev/null +++ b/script/convert-ml-model-huggingface-to-onnx/customize.py @@ -0,0 +1,26 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get("CM_MODEL_HUGG_PATH","") == "": + return {'return': 1, 'error': 'CM_MODEL_HUGG_PATH is not set'} + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + return {'return':0} + +def postprocess(i): + os_info = i['os_info'] + + env = i['env'] + env['HUGGINGFACE_ONNX_FILE_PATH'] = os.path.join(os.getcwd(),"model.onnx") + return {'return':0} \ No newline at end of file diff --git a/script/convert-ml-model-huggingface-to-onnx/run.sh b/script/convert-ml-model-huggingface-to-onnx/run.sh new file mode 100644 index 0000000000..56be76db91 --- /dev/null +++ b/script/convert-ml-model-huggingface-to-onnx/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python -m transformers.onnx --model=${CM_MODEL_HUGG_PATH} ${PWD} \ No newline at end of file diff --git a/script/copy-to-clipboard/README.md b/script/copy-to-clipboard/README.md new file mode 100644 index 0000000000..289e4d4e37 --- /dev/null +++ b/script/copy-to-clipboard/README.md @@ -0,0 +1,143 @@ +Automatically generated README for this automation recipe: **copy-to-clipboard** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=copy-to-clipboard,8b3aaa97ce58474d) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/copy-to-clipboard)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *copy,to,clipboard,copy-to-clipboard* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "copy to clipboard copy-to-clipboard" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=copy,to,clipboard,copy-to-clipboard` + +`cm run script --tags=copy,to,clipboard,copy-to-clipboard [--input_flags]` + +*or* + +`cmr "copy to clipboard copy-to-clipboard"` + +`cmr "copy to clipboard copy-to-clipboard " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'copy,to,clipboard,copy-to-clipboard' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="copy,to,clipboard,copy-to-clipboard"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=copy,to,clipboard,copy-to-clipboard) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "copy to clipboard copy-to-clipboard" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--add_quotes=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value` +* `--q=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value` +* `--t=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value` +* `--text=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "add_quotes":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/copy-to-clipboard/_cm.yaml)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_package.pyperclip + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/copy-to-clipboard/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/copy-to-clipboard/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/copy-to-clipboard/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/copy-to-clipboard/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/copy-to-clipboard/_cm.yaml) + +___ +### Script output +`cmr "copy to clipboard copy-to-clipboard " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/copy-to-clipboard/_cm.yaml b/script/copy-to-clipboard/_cm.yaml new file mode 100644 index 0000000000..de631040b2 --- /dev/null +++ b/script/copy-to-clipboard/_cm.yaml @@ -0,0 +1,32 @@ +alias: copy-to-clipboard +uid: 8b3aaa97ce58474d + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: "DevOps automation" + +tags: +- copy +- to +- clipboard +- copy-to-clipboard + +deps: + + # Get Python + - tags: get,python3 + names: + - python + - python3 + + # Extra package + - tags: get,generic-python-lib,_package.pyperclip + +input_mapping: + text: CM_COPY_TO_CLIPBOARD_TEXT + t: CM_COPY_TO_CLIPBOARD_TEXT + add_quotes: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES + q: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES diff --git a/script/copy-to-clipboard/code.py b/script/copy-to-clipboard/code.py new file mode 100644 index 0000000000..082813e9a0 --- /dev/null +++ b/script/copy-to-clipboard/code.py @@ -0,0 +1,11 @@ +import os +import pyperclip as pc + +text = os.environ.get('CM_COPY_TO_CLIPBOARD_TEXT', '') + +add_quotes = os.environ.get('CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [True,'True','yes'] + +if add_quotes: + text = '"' + text + '"' + +pc.copy(text) diff --git a/script/copy-to-clipboard/run.bat b/script/copy-to-clipboard/run.bat new file mode 100644 index 0000000000..545178f203 --- /dev/null +++ b/script/copy-to-clipboard/run.bat @@ -0,0 +1,4 @@ +rem native script + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/copy-to-clipboard/run.sh b/script/copy-to-clipboard/run.sh new file mode 100644 index 0000000000..fa6f579f76 --- /dev/null +++ b/script/copy-to-clipboard/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit 1 diff --git a/script/create-conda-env/README.md b/script/create-conda-env/README.md new file mode 100644 index 0000000000..2ff16cb9ca --- /dev/null +++ b/script/create-conda-env/README.md @@ -0,0 +1,150 @@ +Automatically generated README for this automation recipe: **create-conda-env** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=create-conda-env,e39e0b04c86a40f2) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *create,get,env,conda-env,conda-environment,create-conda-environment* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "create get env conda-env conda-environment create-conda-environment" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=create,get,env,conda-env,conda-environment,create-conda-environment` + +`cm run script --tags=create,get,env,conda-env,conda-environment,create-conda-environment[,variations] ` + +*or* + +`cmr "create get env conda-env conda-environment create-conda-environment"` + +`cmr "create get env conda-env conda-environment create-conda-environment [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,get,env,conda-env,conda-environment,create-conda-environment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="create,get,env,conda-env,conda-environment,create-conda-environment"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=create,get,env,conda-env,conda-environment,create-conda-environment) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "create get env conda-env conda-environment create-conda-environment[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_name.#` + - Environment variables: + - *CM_CONDA_ENV_NAME*: `#` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,conda + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-conda-env/_cm.json) + +___ +### Script output +`cmr "create get env conda-env conda-environment create-conda-environment [,variations]" -j` +#### New environment keys (filter) + +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_CONDA_BIN_PATH` +* `CM_CONDA_LIB_PATH` +* `CM_CONDA_PREFIX` +* `CONDA_PREFIX` +#### New environment keys auto-detected from customize + +* `CM_CONDA_BIN_PATH` +* `CM_CONDA_LIB_PATH` +* `CM_CONDA_PREFIX` \ No newline at end of file diff --git a/script/create-conda-env/_cm.json b/script/create-conda-env/_cm.json new file mode 100644 index 0000000000..9a2311707f --- /dev/null +++ b/script/create-conda-env/_cm.json @@ -0,0 +1,43 @@ +{ + "alias": "create-conda-env", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "DevOps automation", + "clean_files": [], + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "conda" + ], + "tags": "get,conda" + } + ], + "new_env_keys": [ + "+PATH", + "+LD_LIBRARY_PATH", + "CM_CONDA_PREFIX", + "CONDA_PREFIX", + "CM_CONDA_BIN_PATH", + "CM_CONDA_LIB_PATH" + ], + "tags": [ + "create", + "get", + "env", + "conda-env", + "conda-environment", + "create-conda-environment" + ], + "uid": "e39e0b04c86a40f2", + "variations": { + "name.#": { + "env": { + "CM_CONDA_ENV_NAME": "#" + } + } + } +} diff --git a/script/create-conda-env/customize.py b/script/create-conda-env/customize.py new file mode 100644 index 0000000000..3d4b17e7fd --- /dev/null +++ b/script/create-conda-env/customize.py @@ -0,0 +1,31 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + recursion_spaces = i['recursion_spaces'] + + if env.get('CM_CONDA_ENV_NAME', '') == '': + return {'return':1, 'error': 'Please use "_name." variation'} + + return {'return':0} + +def postprocess(i): + env = i['env'] + + conda_prefix = os.getcwd() + env['CONDA_PREFIX'] = conda_prefix + env['CM_CONDA_PREFIX'] = conda_prefix + env['CM_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin") + env['CM_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib") + + env['+PATH'] = [ env['CM_CONDA_BIN_PATH'] ] + env['+LD_LIBRARY_PATH'] = [ env['CM_CONDA_LIB_PATH'] ] + + return {'return':0} diff --git a/script/create-conda-env/run.sh b/script/create-conda-env/run.sh new file mode 100644 index 0000000000..540dde9b1b --- /dev/null +++ b/script/create-conda-env/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +cmd="${CM_CONDA_BIN_WITH_PATH} create -p ${PWD}" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + diff --git a/script/create-fpgaconvnet-app-tinyml/README.md b/script/create-fpgaconvnet-app-tinyml/README.md new file mode 100644 index 0000000000..c7220c0aeb --- /dev/null +++ b/script/create-fpgaconvnet-app-tinyml/README.md @@ -0,0 +1,158 @@ +Automatically generated README for this automation recipe: **create-fpgaconvnet-app-tinyml** + +Category: **TinyML automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=create-fpgaconvnet-app-tinyml,618f3520e98e4728) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *create,app,fpgaconvnet* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "create app fpgaconvnet" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=create,app,fpgaconvnet` + +`cm run script --tags=create,app,fpgaconvnet[,variations] ` + +*or* + +`cmr "create app fpgaconvnet"` + +`cmr "create app fpgaconvnet [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,app,fpgaconvnet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="create,app,fpgaconvnet"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=create,app,fpgaconvnet) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "create app fpgaconvnet[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**benchmark**" +
+ Click here to expand this section. + + * **`_ic`** (default) + - Workflow: + +
+ + + * Group "**board**" +
+ Click here to expand this section. + + * **`_zc706`** (default) + - Environment variables: + - *CM_TINY_BOARD*: `zc706` + - Workflow: + +
+ + +#### Default variations + +`_ic,_zc706` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml/_cm.json)*** + * create,fpgaconvnet,config + * CM names: `--adr.['config-generator']...` + - CM script: [create-fpgaconvnet-config-tinyml](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/create-fpgaconvnet-config-tinyml) + * get,xilinx,sdk + * CM names: `--adr.['xilinx-sdk']...` + - CM script: [get-xilinx-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-xilinx-sdk) + * get,tensorflow + * CM names: `--adr.['tensorflow']...` + - CM script: [install-tensorflow-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tensorflow-from-src) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-app-tinyml/_cm.json) + +___ +### Script output +`cmr "create app fpgaconvnet [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/create-fpgaconvnet-app-tinyml/_cm.json b/script/create-fpgaconvnet-app-tinyml/_cm.json new file mode 100644 index 0000000000..34573d8342 --- /dev/null +++ b/script/create-fpgaconvnet-app-tinyml/_cm.json @@ -0,0 +1,59 @@ +{ + "alias": "create-fpgaconvnet-app-tinyml", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "TinyML automation", + "deps": [ + { + "names": [ "config-generator" ], + "tags": "create,fpgaconvnet,config" + }, + { + "names": [ "xilinx-sdk" ], + "tags": "get,xilinx,sdk", + "version": "2019.1" + }, + { + "names": [ "tensorflow" ], + "tags": "get,tensorflow" + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "create", + "app", + "fpgaconvnet" + ], + "uid": "618f3520e98e4728", + "variations": { + "zc706": { + "env": { + "CM_TINY_BOARD": "zc706" + }, + "add_deps": { + "config-generator": { + "tags": "_zc706" + } + }, + "group": "board", + "default": true + }, + "ic": { + "add_deps": { + "config-generator": { + "tags": "_ic" + } + }, + "group": "benchmark", + "default": true + } + }, + "versions": {} +} diff --git a/script/create-fpgaconvnet-app-tinyml/customize.py b/script/create-fpgaconvnet-app-tinyml/customize.py new file mode 100644 index 0000000000..c139e3a476 --- /dev/null +++ b/script/create-fpgaconvnet-app-tinyml/customize.py @@ -0,0 +1,38 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] + run_dir = env['CM_TINY_FPGACONVNET_'+network_env_name+'_RUN_DIR'] + + run_cmd = "cd " + run_dir + " && xsct create_boot_image.tcl" + + env['CM_RUN_CMD'] = run_cmd + env['CM_RUN_DIR'] = run_dir + + return {'return':0} + +def postprocess(i): + + env = i['env'] + return {'return':1} + + network = env['CM_TINY_NETWORK_NAME'] + json_location = os.path.join(env['CM_RUN_DIR'], env['CM_TINY_NETWORK_NAME'] + ".json") + if os.path.exists(json_location): + print(f"JSON configuration file for {network} created at {json_location}") + else: + return {'return':1, 'error': "JSON configuration file generation failed"} + + return {'return':0} diff --git a/script/create-fpgaconvnet-app-tinyml/run.sh b/script/create-fpgaconvnet-app-tinyml/run.sh new file mode 100644 index 0000000000..fe67c233c5 --- /dev/null +++ b/script/create-fpgaconvnet-app-tinyml/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + fi + exit_if_error +} + +#Add your run commands here... +run "${CM_RUN_CMD}" + diff --git a/script/create-fpgaconvnet-config-tinyml/README.md b/script/create-fpgaconvnet-config-tinyml/README.md new file mode 100644 index 0000000000..f60bc75d9d --- /dev/null +++ b/script/create-fpgaconvnet-config-tinyml/README.md @@ -0,0 +1,175 @@ +Automatically generated README for this automation recipe: **create-fpgaconvnet-config-tinyml** + +Category: **TinyML automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=create-fpgaconvnet-config-tinyml,f6cdad166cfa47bc) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *create,config,fpgaconvnet* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "create config fpgaconvnet" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=create,config,fpgaconvnet` + +`cm run script --tags=create,config,fpgaconvnet[,variations] ` + +*or* + +`cmr "create config fpgaconvnet"` + +`cmr "create config fpgaconvnet [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,config,fpgaconvnet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="create,config,fpgaconvnet"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=create,config,fpgaconvnet) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "create config fpgaconvnet[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_zc706,ic` + - Environment variables: + - *CM_TINY_NETWORK_NAME*: `zc706-resnet` + - Workflow: + +
+ + + * Group "**benchmark**" +
+ Click here to expand this section. + + * **`_ic`** (default) + - Workflow: + +
+ + + * Group "**board**" +
+ Click here to expand this section. + + * **`_zc706`** (default) + - Environment variables: + - *CM_TINY_BOARD*: `zc706` + - Workflow: + +
+ + +#### Default variations + +`_ic,_zc706` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml/_cm.json)*** + * get,python3 + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,ml-model,tiny + * CM names: `--adr.['ml-model']...` + - CM script: [get-ml-model-tiny-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-tiny-resnet) + * get,git,repo,_repo.https://github.com/mlcommons/submissions_tiny_v1.1 + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-fpgaconvnet-config-tinyml/_cm.json) + +___ +### Script output +`cmr "create config fpgaconvnet [,variations]" -j` +#### New environment keys (filter) + +* `CM_TINY_FPGACONVNET*` +#### New environment keys auto-detected from customize + +* `CM_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH` +* `CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR` +* `CM_TINY_FPGACONVNET_CONFIG_FILE_' + network_env_name + '_PATH` +* `CM_TINY_FPGACONVNET_NETWORK_ENV_NAME` +* `CM_TINY_FPGACONVNET_NETWORK_NAME` \ No newline at end of file diff --git a/script/create-fpgaconvnet-config-tinyml/_cm.json b/script/create-fpgaconvnet-config-tinyml/_cm.json new file mode 100644 index 0000000000..491b58e22c --- /dev/null +++ b/script/create-fpgaconvnet-config-tinyml/_cm.json @@ -0,0 +1,58 @@ +{ + "alias": "create-fpgaconvnet-config-tinyml", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "TinyML automation", + "deps": [ + { + "tags": "get,python3" + }, + { + "names": [ "ml-model" ], + "tags": "get,ml-model,tiny" + }, + { + "tags": "get,git,repo,_repo.https://github.com/mlcommons/submissions_tiny_v1.1" + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "CM_TINY_FPGACONVNET*" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "create", + "config", + "fpgaconvnet" + ], + "uid": "f6cdad166cfa47bc", + "variations": { + "zc706": { + "env": { + "CM_TINY_BOARD": "zc706" + }, + "group": "board", + "default": true + }, + "ic": { + "add_deps": { + "ml-model": { + "tags": "resnet,_onnx" + } + }, + "group": "benchmark", + "default": true + }, + "zc706,ic": { + "env": { + "CM_TINY_NETWORK_NAME": "zc706-resnet" + } + } + }, + "versions": {} +} diff --git a/script/create-fpgaconvnet-config-tinyml/customize.py b/script/create-fpgaconvnet-config-tinyml/customize.py new file mode 100644 index 0000000000..8590890bb9 --- /dev/null +++ b/script/create-fpgaconvnet-config-tinyml/customize.py @@ -0,0 +1,53 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + code_path = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "closed", "fpgaconvnet", "code") + network_env_name = env['CM_TINY_NETWORK_NAME'].replace("-", "_").upper() + env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] = network_env_name + env['CM_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH'] = code_path + + board = env.get('CM_TINY_BOARD', 'zc706') + + benchmark = env.get('CM_TINY_BENCHMARK', 'ic') + + run_dir = os.path.join(code_path, board, benchmark) + env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] = run_dir + + run_cmd = "cd " + run_dir + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py" + + env['ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['CM_RUN_CMD'] = run_cmd + env['CM_RUN_DIR'] = run_dir + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + network = env['CM_TINY_NETWORK_NAME'] + env['CM_TINY_FPGACONVNET_NETWORK_NAME'] = network + network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] + + json_location = os.path.join(env['CM_RUN_DIR'], env['CM_TINY_NETWORK_NAME'] + ".json") + if os.path.exists(json_location): + print(f"JSON configuration file for {network} created at {json_location}") + else: + return {'return':1, 'error': "JSON configuration file generation failed"} + + env['CM_TINY_FPGACONVNET_CONFIG_FILE_' + network_env_name + '_PATH'] = json_location + env['CM_GET_DEPENDENT_CACHED_PATH'] = json_location + + return {'return':0} diff --git a/script/create-fpgaconvnet-config-tinyml/run.sh b/script/create-fpgaconvnet-config-tinyml/run.sh new file mode 100644 index 0000000000..fe67c233c5 --- /dev/null +++ b/script/create-fpgaconvnet-config-tinyml/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + fi + exit_if_error +} + +#Add your run commands here... +run "${CM_RUN_CMD}" + diff --git a/script/create-patch/README-extra.md b/script/create-patch/README-extra.md new file mode 100644 index 0000000000..de783504d0 --- /dev/null +++ b/script/create-patch/README-extra.md @@ -0,0 +1,5 @@ +# Examples + +``` +cmr "create patch" --new=new --old=old --exclude=.git,__pycache_ +``` diff --git a/script/create-patch/README.md b/script/create-patch/README.md new file mode 100644 index 0000000000..c967ad3fb0 --- /dev/null +++ b/script/create-patch/README.md @@ -0,0 +1,137 @@ +Automatically generated README for this automation recipe: **create-patch** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=create-patch,0659dc1f75664c65) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-patch)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *create,patch* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "create patch" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=create,patch` + +`cm run script --tags=create,patch [--input_flags]` + +*or* + +`cmr "create patch"` + +`cmr "create patch " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,patch' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="create,patch"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=create,patch) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "create patch" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--exclude=value` → `CM_CREATE_PATCH_EXCLUDE=value` +* `--new=value` → `CM_CREATE_PATCH_NEW=value` +* `--old=value` → `CM_CREATE_PATCH_OLD=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "exclude":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-patch/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-patch/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-patch/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-patch/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-patch/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/create-patch/_cm.yaml) + +___ +### Script output +`cmr "create patch " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/create-patch/_cm.yaml b/script/create-patch/_cm.yaml new file mode 100644 index 0000000000..cbcedb6487 --- /dev/null +++ b/script/create-patch/_cm.yaml @@ -0,0 +1,22 @@ +uid: 0659dc1f75664c65 +alias: create-patch + +category: "DevOps automation" + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +deps: +- tags: detect,os + +input_mapping: + new: CM_CREATE_PATCH_NEW + old: CM_CREATE_PATCH_OLD + exclude: CM_CREATE_PATCH_EXCLUDE + +tags: +- create +- patch + diff --git a/script/create-patch/customize.py b/script/create-patch/customize.py new file mode 100644 index 0000000000..2990d29ff0 --- /dev/null +++ b/script/create-patch/customize.py @@ -0,0 +1,53 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + new_dir = env.get('CM_CREATE_PATCH_NEW', '') + if new_dir == '': + return {'return':1, 'error':'specify NEW directory using --new'} + if not os.path.isdir(new_dir): + return {'return':1, 'error':'NEW directory doesn\'t exist {}'.format(new_dir)} + + old_dir = env.get('CM_CREATE_PATCH_OLD', '') + if old_dir == '': + return {'return':1, 'error':'specify OLD directory using --old'} + if not os.path.isdir(old_dir): + return {'return':1, 'error':'OLD directory doesn\'t exist {}'.format(old_dir)} + + exclude = env.get('CM_CREATE_PATCH_EXCLUDE', '').strip() + x_exclude = '' + + if exclude!='': + for e in exclude.split(','): + x_exclude+=' --exclude={}'.format(e) + + cmd = 'diff -Naur {} {} {} > patch.patch'.format(x_exclude, old_dir, new_dir) + + if not quiet: + print ('') + print ('Running command:') + print ('') + print (cmd) + print ('') + + os.system(cmd) + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/destroy-terraform/README-extra.md b/script/destroy-terraform/README-extra.md new file mode 100644 index 0000000000..8768e0fc7a --- /dev/null +++ b/script/destroy-terraform/README-extra.md @@ -0,0 +1 @@ +This CM script is automatically called from run-terraform script when `--destroy` option is given. diff --git a/script/destroy-terraform/README.md b/script/destroy-terraform/README.md new file mode 100644 index 0000000000..2be7cf8237 --- /dev/null +++ b/script/destroy-terraform/README.md @@ -0,0 +1,123 @@ +Automatically generated README for this automation recipe: **destroy-terraform** + +Category: **Cloud automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=destroy-terraform,3463458d03054856) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *destroy,terraform,cmd* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "destroy terraform cmd" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=destroy,terraform,cmd` + +`cm run script --tags=destroy,terraform,cmd ` + +*or* + +`cmr "destroy terraform cmd"` + +`cmr "destroy terraform cmd " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'destroy,terraform,cmd' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="destroy,terraform,cmd"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=destroy,terraform,cmd) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "destroy terraform cmd" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/_cm.json)*** + * get,terraform + * CM names: `--adr.['terraform']...` + - CM script: [get-terraform](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-terraform) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/destroy-terraform/_cm.json) + +___ +### Script output +`cmr "destroy terraform cmd " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/destroy-terraform/_cm.json b/script/destroy-terraform/_cm.json new file mode 100644 index 0000000000..663dbbb877 --- /dev/null +++ b/script/destroy-terraform/_cm.json @@ -0,0 +1,20 @@ +{ + "alias": "destroy-terraform", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Cloud automation", + "tags": [ + "destroy", + "terraform", + "cmd" + ], + "deps": [ + { + "names": [ + "terraform" + ], + "tags": "get,terraform" + } + ], + "uid": "3463458d03054856" +} diff --git a/script/destroy-terraform/customize.py b/script/destroy-terraform/customize.py new file mode 100644 index 0000000000..b10640e6d3 --- /dev/null +++ b/script/destroy-terraform/customize.py @@ -0,0 +1,18 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + return {'return':0} + +def postprocess(i): + + return {'return':0} + + diff --git a/script/destroy-terraform/run.bat b/script/destroy-terraform/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/destroy-terraform/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/destroy-terraform/run.sh b/script/destroy-terraform/run.sh new file mode 100644 index 0000000000..9e0ae31ac6 --- /dev/null +++ b/script/destroy-terraform/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +source ${CM_TERRAFORM_CONFIG_DIR}/credentials.sh +source ${CM_TERRAFORM_CONFIG_DIR}/apply_credentials.sh +cd ${CM_TERRAFORM_RUN_DIR} +terraform destroy --auto-approve +test $? -eq 0 || exit 1 diff --git a/script/detect-cpu/README-extra.md b/script/detect-cpu/README-extra.md new file mode 100644 index 0000000000..c2326c281d --- /dev/null +++ b/script/detect-cpu/README-extra.md @@ -0,0 +1,17 @@ +# Detect CPU +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the host CPU details and exports them in a unified list of environment variables to be reused across the supported operating systems. + +## Exported Variables +* `CM_HOST_CPU_L1I_CACHE_SIZE` +* `CM_HOST_CPU_L2_CACHE_SIZE` +* `CM_HOST_CPU_MEMSIZE` +* `CM_HOST_CPU_SOCKETS` +* `CM_HOST_CPU_THREADS_PER_CORE` +* `CM_HOST_CPU_TOTAL_CORES` +* `CM_HOST_CPU_TOTAL_LOGICAL_CORES` +* `CM_HOST_CPU_TOTAL_PHYSICAL_CORES` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 +3. macOS 12.6 diff --git a/script/detect-cpu/README.md b/script/detect-cpu/README.md new file mode 100644 index 0000000000..fcd2e31cc0 --- /dev/null +++ b/script/detect-cpu/README.md @@ -0,0 +1,130 @@ +Automatically generated README for this automation recipe: **detect-cpu** + +Category: **Platform information** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=detect-cpu,586c8a43320142f7) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *detect,cpu,detect-cpu,info* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "detect cpu detect-cpu info" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=detect,cpu,detect-cpu,info` + +`cm run script --tags=detect,cpu,detect-cpu,info ` + +*or* + +`cmr "detect cpu detect-cpu info"` + +`cmr "detect cpu detect-cpu info " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'detect,cpu,detect-cpu,info' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="detect,cpu,detect-cpu,info"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=detect,cpu,detect-cpu,info) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "detect cpu detect-cpu info" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-cpu/_cm.json) + +___ +### Script output +`cmr "detect cpu detect-cpu info " -j` +#### New environment keys (filter) + +* `CM_HOST_CPU_*` +* `CM_HOST_DISK_CAPACITY` +* `CM_HOST_MEMORY_CAPACITY` +#### New environment keys auto-detected from customize + +* `CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET` +* `CM_HOST_CPU_SOCKETS` +* `CM_HOST_CPU_THREADS_PER_CORE` +* `CM_HOST_CPU_TOTAL_LOGICAL_CORES` \ No newline at end of file diff --git a/script/detect-cpu/_cm.json b/script/detect-cpu/_cm.json new file mode 100644 index 0000000000..215bad2b21 --- /dev/null +++ b/script/detect-cpu/_cm.json @@ -0,0 +1,31 @@ +{ + "alias": "detect-cpu", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Platform information", + "clean_files": [ + "tmp-lscpu.out", + "tmp-systeminfo.csv", + "tmp-wmic-cpu.csv" + ], + "deps": [ + { + "tags": "detect,os" + } + ], + "new_env_keys": [ + "CM_HOST_CPU_*", + "CM_HOST_MEMORY_CAPACITY", + "CM_HOST_DISK_CAPACITY" + ], + "new_state_keys": [ + "host_device_raw_info" + ], + "tags": [ + "detect", + "cpu", + "detect-cpu", + "info" + ], + "uid": "586c8a43320142f7" +} diff --git a/script/detect-cpu/customize.py b/script/detect-cpu/customize.py new file mode 100644 index 0000000000..7a5586667a --- /dev/null +++ b/script/detect-cpu/customize.py @@ -0,0 +1,172 @@ +from cmind import utils +import os + +lscpu_out = 'tmp-lscpu.out' + +def preprocess(i): + + if os.path.isfile(lscpu_out): + os.remove(lscpu_out) + + return {'return':0} + + +def postprocess(i): + + state = i['state'] + + env = i['env'] + + os_info = i['os_info'] + + automation = i['automation'] + logger = automation.cmind.logger + + if os_info['platform'] == 'windows': + sys = [] + sys1 = [] + cpu = [] + cpu1 = [] + + import csv + + try: + f = 'tmp-systeminfo.csv' + + if not os.path.isfile(f): + print ('WARNING: {} file was not generated!'.format(f)) + else: + keys = {} + j = 0 + with open(f, 'r') as csvf: + for s in csv.reader(csvf): + if j==0: + keys=s + else: + x = {} + for k in range(0, len(s)): + x[keys[k]]=s[k] + + sys.append(x) + + if j==1: + sys1 = x + + j+=1 + + except Exception as e: + logger.warning ('WARNING: problem processing file {} ({})!'.format(f, format(e))) + pass + + try: + f = 'tmp-wmic-cpu.csv' + if not os.path.isfile(f): + logger.warning ('WARNING: {} file was not generated!'.format(f)) + else: + + keys = {} + j = 0 + + with open(f, 'r', encoding='utf16') as csvf: + for s in csv.reader(csvf): + if j==1: + keys=s + elif j>1: + x = {} + for k in range(0, len(s)): + x[keys[k]]=s[k] + + cpu.append(x) + + if j==2: + cpu1 = x + + j+=1 + + except Exception as e: + logger.warning ('WARNING: problem processing file {} ({})!'.format(f, format(e))) + pass + + + state['host_device_raw_info']={'sys':sys, 'sys1':sys1, 'cpu':cpu, 'cpu1':cpu1} + + logger.warning ('WARNING: need to unify system and cpu output on Windows') + + return {'return':0} + + + ############################################################################### + # Linux + if not os.path.isfile(lscpu_out): + print ('WARNING: lscpu.out file was not generated!') + + # Currently ignore this error though probably should fail? + # But need to check that is supported on all platforms. + return {'return':0} + + r = utils.load_txt(file_name=lscpu_out) + if r['return']>0: return r + + ss = r['string'] + + #state['cpu_info_raw'] = ss + + # Unifying some CPU info across different platforms + unified_env = { + 'CM_CPUINFO_CPUs':'CM_HOST_CPU_TOTAL_CORES', + 'CM_CPUINFO_L1d_cache': 'CM_HOST_CPU_L1D_CACHE_SIZE', + 'CM_CPUINFO_L1i_cache': 'CM_HOST_CPU_L1I_CACHE_SIZE', + 'CM_CPUINFO_L2_cache': 'CM_HOST_CPU_L2_CACHE_SIZE', + 'CM_CPUINFO_L3_cache': 'CM_HOST_CPU_L3_CACHE_SIZE', + 'CM_CPUINFO_Sockets': 'CM_HOST_CPU_SOCKETS', + 'CM_CPUINFO_NUMA_nodes': 'CM_HOST_CPU_NUMA_NODES', + 'CM_CPUINFO_Cores_per_socket': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'CM_CPUINFO_Cores_per_cluster': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'CM_CPUINFO_Threads_per_core': 'CM_HOST_CPU_THREADS_PER_CORE', + 'CM_CPUINFO_Architecture': 'CM_HOST_CPU_ARCHITECTURE', + 'CM_CPUINFO_CPU_family': 'CM_HOST_CPU_FAMILY', + 'CM_CPUINFO_CPU_max_MHz': 'CM_HOST_CPU_MAX_MHZ', + 'CM_CPUINFO_Model_name': 'CM_HOST_CPU_MODEL_NAME', + 'CM_CPUINFO_On_line_CPUs_list': 'CM_HOST_CPU_ON_LINE_CPUS_LIST', + 'CM_CPUINFO_Vendor_ID': 'CM_HOST_CPU_VENDOR_ID', + 'CM_CPUINFO_hw_physicalcpu': 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', + 'CM_CPUINFO_hw_logicalcpu': 'CM_HOST_CPU_TOTAL_CORES', + 'CM_CPUINFO_hw_packages': 'CM_HOST_CPU_SOCKETS', + 'CM_CPUINFO_hw_memsize': 'CM_HOST_CPU_MEMSIZE', + 'CM_CPUINFO_hw_l1icachesize': 'CM_HOST_CPU_L1I_CACHE_SIZE', + 'CM_CPUINFO_hw_l1dcachesize': 'CM_HOST_CPU_L1D_CACHE_SIZE', + 'CM_CPUINFO_hw_l2cachesize': 'CM_HOST_CPU_L2_CACHE_SIZE' + } + + if env['CM_HOST_OS_TYPE'] == 'linux': + vkeys = [ 'Architecture', 'Model name', 'Vendor ID', 'CPU family', 'NUMA node(s)', 'CPU(s)', \ + 'On-line CPU(s) list', 'Socket(s)', 'Core(s) per socket', 'Core(s) per cluster', 'Thread(s) per core', 'L1d cache', 'L1i cache', 'L2 cache', \ + 'L3 cache', 'CPU max MHz' ] + elif env['CM_HOST_OS_FLAVOR'] == 'macos': + vkeys = [ 'hw.physicalcpu', 'hw.logicalcpu', 'hw.packages', 'hw.ncpu', 'hw.memsize', 'hw.l1icachesize', \ + 'hw.l2cachesize' ] + if vkeys: + for s in ss.split('\n'): + v = s.split(':') + key = v[0] + if key in vkeys: + env_key = 'CM_CPUINFO_'+key.replace(" ","_").replace('(','').replace(')','').replace('-','_').replace('.','_') + if env_key in unified_env: + env[unified_env[env_key]]=v[1].strip() + else: + env[env_key] = v[1].strip() + + if env.get('CM_HOST_CPU_SOCKETS','') == '-':#assume as 1 + env['CM_HOST_CPU_SOCKETS'] = '1' + + if env.get('CM_HOST_CPU_TOTAL_CORES', '') != '' and env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '': + env['CM_HOST_CPU_TOTAL_LOGICAL_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] + + if env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES','') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES','') != '' and env.get('CM_HOST_CPU_THREADS_PER_CORE','') == '': + env['CM_HOST_CPU_THREADS_PER_CORE'] = str(int(int(env['CM_HOST_CPU_TOTAL_LOGICAL_CORES']) // + int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']))) + + if env.get('CM_HOST_CPU_SOCKETS','') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES','') != '' and env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET','') == '': + env['CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str(int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['CM_HOST_CPU_SOCKETS'])) + + return {'return':0} diff --git a/script/detect-cpu/run.bat b/script/detect-cpu/run.bat new file mode 100644 index 0000000000..32347c87dd --- /dev/null +++ b/script/detect-cpu/run.bat @@ -0,0 +1,2 @@ +rem systeminfo /fo csv > tmp-systeminfo.csv +wmic cpu get /FORMAT:csv > tmp-wmic-cpu.csv diff --git a/script/detect-cpu/run.sh b/script/detect-cpu/run.sh new file mode 100644 index 0000000000..e53c5df44d --- /dev/null +++ b/script/detect-cpu/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then + sysctl -a | grep hw > tmp-lscpu.out +else + lscpu > tmp-lscpu.out + memory_capacity=`free -h --si | grep Mem: | tr -s ' ' | cut -d' ' -f2` + echo "CM_HOST_MEMORY_CAPACITY=$memory_capacity">>tmp-run-env.out + disk_capacity=`df -h --total -l |grep total |tr -s ' '|cut -d' ' -f2` + echo "CM_HOST_DISK_CAPACITY=$disk_capacity">>tmp-run-env.out +fi diff --git a/script/detect-os/README.md b/script/detect-os/README.md new file mode 100644 index 0000000000..5df1e04825 --- /dev/null +++ b/script/detect-os/README.md @@ -0,0 +1,139 @@ +Automatically generated README for this automation recipe: **detect-os** + +Category: **Platform information** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=detect-os,863735b7db8c44fc) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *detect-os,detect,os,info* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "detect-os detect os info" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=detect-os,detect,os,info` + +`cm run script --tags=detect-os,detect,os,info ` + +*or* + +`cmr "detect-os detect os info"` + +`cmr "detect-os detect os info " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'detect-os,detect,os,info' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="detect-os,detect,os,info"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=detect-os,detect,os,info) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "detect-os detect os info" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-os/_cm.json)*** + * get,sys-utils-min + * `if (CM_HOST_OS_TYPE == windows)` + - CM script: [get-sys-utils-min](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-min) + +___ +### Script output +`cmr "detect-os detect os info " -j` +#### New environment keys (filter) + +* `+CM_HOST_OS_*` +* `+PATH` +* `CM_HOST_OS_*` +* `CM_HOST_PLATFORM_*` +* `CM_HOST_PYTHON_*` +* `CM_HOST_SYSTEM_NAME` +* `CM_RUN_STATE_DOCKER` +#### New environment keys auto-detected from customize + +* `CM_HOST_OS_BITS` +* `CM_HOST_OS_MACHINE` +* `CM_HOST_OS_PACKAGE_MANAGER` +* `CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD` +* `CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD` +* `CM_HOST_OS_TYPE` +* `CM_HOST_PYTHON_BITS` +* `CM_HOST_SYSTEM_NAME` \ No newline at end of file diff --git a/script/detect-os/_cm.json b/script/detect-os/_cm.json new file mode 100644 index 0000000000..383b7f9302 --- /dev/null +++ b/script/detect-os/_cm.json @@ -0,0 +1,38 @@ +{ + "alias": "detect-os", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Platform information", + "clean_files": [ + "tmp-run.out" + ], + "new_env_keys": [ + "CM_HOST_OS_*", + "+CM_HOST_OS_*", + "CM_HOST_PLATFORM_*", + "CM_HOST_PYTHON_*", + "CM_HOST_SYSTEM_NAME", + "CM_RUN_STATE_DOCKER", + "+PATH" + ], + "new_state_keys": [ + "os_uname_*" + ], + "post_deps": [ + { + "enable_if_env": { + "CM_HOST_OS_TYPE": [ + "windows" + ] + }, + "tags": "get,sys-utils-min" + } + ], + "tags": [ + "detect-os", + "detect", + "os", + "info" + ], + "uid": "863735b7db8c44fc" +} diff --git a/script/detect-os/customize.py b/script/detect-os/customize.py new file mode 100644 index 0000000000..2f8dd4c76d --- /dev/null +++ b/script/detect-os/customize.py @@ -0,0 +1,100 @@ +from cmind import utils +import os +import subprocess + +def preprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + # Update env variables + env['CM_HOST_OS_TYPE'] = os_info['platform'] + env['CM_HOST_OS_BITS'] = os_info['bits'] + env['CM_HOST_PYTHON_BITS'] = os_info['python_bits'] + + # Update state (demo) + # state['os_info'] = os_info + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + if os_info['platform'] != 'windows': + if os_info['platform'] == 'linux': + sys_cmd = "ld --verbose | grep SEARCH_DIR " + result = subprocess.check_output(sys_cmd, shell=True).decode("utf-8") + result = result.replace("SEARCH_DIR(\"=", "") + result = result.replace("SEARCH_DIR(\"", "") + result = result.replace("\")", "") + result = result.replace(" ", "") + result = result.replace("\n", "") + dirs = result.split(';') + lib_dir = [] + for _dir in dirs: + if _dir != '' and _dir not in lib_dir: + lib_dir.append(_dir) + env['+CM_HOST_OS_DEFAULT_LIBRARY_PATH'] = lib_dir + + r = utils.load_txt(file_name='tmp-run.out', + check_if_exists = True, + split = True) + if r['return']>0: return r + + s = r['list'] + + state['os_uname_machine'] = s[0] + state['os_uname_all'] = s[1] + + env['CM_HOST_OS_MACHINE'] = state['os_uname_machine'] + + import platform + + env['CM_HOST_SYSTEM_NAME'] = platform.node() + + if 'CM_HOST_OS_PACKAGE_MANAGER' not in env: + if env.get('CM_HOST_OS_FLAVOR','') == "ubuntu" or \ + "debian" in env.get('CM_HOST_OS_FLAVOR_LIKE','') or \ + env.get('CM_HOST_OS_FLAVOR','') == "debian": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "apt" + if env.get('CM_HOST_OS_FLAVOR','') == "rhel" or \ + "rhel" in env.get('CM_HOST_OS_FLAVOR_LIKE',''): + env['CM_HOST_OS_PACKAGE_MANAGER'] = "dnf" + if env.get('CM_HOST_OS_FLAVOR','') == "amzn": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "yum" + if env.get('CM_HOST_OS_FLAVOR_LIKE','') == "arch": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "arch" + if env.get('CM_HOST_OS_FLAVOR','') == "macos": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "brew" + if env.get('CM_HOST_OS_FLAVOR','') == "sles": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "zypper" + if env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "apt": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "DEBIAN_FRONTEND=noninteractive apt-get install -y" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "apt-get update -y" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "dnf": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "dnf install -y" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "dnf update -y" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "pacman": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "pacman -Sy --noconfirm" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "pacman -Syu" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "brew": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "brew install" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "brew update" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "yum": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "yum install -y --skip-broken" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "yum update -y" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "zypper": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "zypper install -y" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "zypper update -y" + + if os.path.exists("/.dockerenv"): + env['CM_RUN_INSIDE_DOCKER'] = "yes" + + return {'return':0} diff --git a/script/detect-os/run.bat b/script/detect-os/run.bat new file mode 100644 index 0000000000..89b468ecc8 --- /dev/null +++ b/script/detect-os/run.bat @@ -0,0 +1 @@ +echo {"detect-os-test":"win"} > tmp-run-state.json diff --git a/script/detect-os/run.sh b/script/detect-os/run.sh new file mode 100644 index 0000000000..9e3c56cd9a --- /dev/null +++ b/script/detect-os/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +uname -m > tmp-run.out +uname -a >> tmp-run.out +if test -f "/etc/os-release"; then + echo "CM_HOST_OS_FLAVOR=`cat /etc/os-release | grep '^ID=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "CM_HOST_OS_FLAVOR_LIKE=`cat /etc/os-release | grep '^ID_LIKE=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "CM_HOST_OS_VERSION=`cat /etc/os-release | grep '^VERSION_ID=' | cut -d'=' -f2 | cut -d'"' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out + echo "CM_HOST_PLATFORM_FLAVOR=`uname -m`" >> tmp-run-env.out + echo "CM_HOST_OS_GLIBC_VERSION=`ldd --version | tail -n +1 | head -1 | cut -d')' -f2 | cut -d' ' -f2`" >> tmp-run-env.out +else + CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f2 | tr '[:upper:]' '[:lower:]'` + if [ -z ${CM_HOST_OS_FLAVOR} ]; then + CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f3 | tr '[:upper:]' '[:lower:]' ` + fi + echo "CM_HOST_OS_FLAVOR=${CM_HOST_OS_FLAVOR}" >> tmp-run-env.out + echo "CM_HOST_OS_VERSION=`sw_vers | grep '^ProductVersion:' | cut -f2 | tr '[:upper:]' '[:lower:]' `" >> tmp-run-env.out + echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out + echo "CM_HOST_PLATFORM_FLAVOR=`uname -m `" >> tmp-run-env.out +fi diff --git a/script/detect-os/run_config.yml b/script/detect-os/run_config.yml new file mode 100644 index 0000000000..938e3b641b --- /dev/null +++ b/script/detect-os/run_config.yml @@ -0,0 +1,6 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/script/detect-sudo/README.md b/script/detect-sudo/README.md new file mode 100644 index 0000000000..b7590ec23b --- /dev/null +++ b/script/detect-sudo/README.md @@ -0,0 +1,122 @@ +Automatically generated README for this automation recipe: **detect-sudo** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=detect-sudo,1d47ffc556e248dc) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *detect,sudo,access* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "detect sudo access" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=detect,sudo,access` + +`cm run script --tags=detect,sudo,access ` + +*or* + +`cmr "detect sudo access"` + +`cmr "detect sudo access " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'detect,sudo,access' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="detect,sudo,access"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=detect,sudo,access) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "detect sudo access" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/detect-sudo/_cm.yaml) + +___ +### Script output +`cmr "detect sudo access " -j` +#### New environment keys (filter) + +* `CM_SUDO_*` +#### New environment keys auto-detected from customize + +* `CM_SUDO_USER` \ No newline at end of file diff --git a/script/detect-sudo/_cm.yaml b/script/detect-sudo/_cm.yaml new file mode 100644 index 0000000000..56c83b8304 --- /dev/null +++ b/script/detect-sudo/_cm.yaml @@ -0,0 +1,17 @@ +uid: 1d47ffc556e248dc +alias: detect-sudo + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: DevOps automation + +new_env_keys: + - CM_SUDO_* + +tags: +- detect +- sudo +- access diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py new file mode 100644 index 0000000000..2f6fa411a4 --- /dev/null +++ b/script/detect-sudo/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os, subprocess + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if prompt_sudo() == 0: + env['CM_SUDO_USER'] = "yes" + + return {'return':0} + +def prompt_sudo(): + if os.geteuid() != 0: + msg = "[sudo] password for %u:" + return subprocess.check_call("sudo echo 'Check sudo' -p '%s'" % msg, shell=True) + return -1 + + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/detect-sudo/run.sh b/script/detect-sudo/run.sh new file mode 100644 index 0000000000..3a584c10cf --- /dev/null +++ b/script/detect-sudo/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/script/download-and-extract/README-extra.md b/script/download-and-extract/README-extra.md new file mode 100644 index 0000000000..6573ab848b --- /dev/null +++ b/script/download-and-extract/README-extra.md @@ -0,0 +1,109 @@ +# CM interface to download and extract files in a unified way on any system + +## Download and extract file without CM caching + +### Use internal CM download function + +This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157) +to download and extract a given file to the current directory: + +```bash +cmr "download-and-extract file _extract" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip +``` +or + +```bash +cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" -j +``` + +```json + "new_env": { + "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", + "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work" + }, +``` + +#### Input flags and equivalent environment variables + +* `--url` or `--env.CM_DAE_URL` - URL to download file +* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification +* `--download_path` or `--store` or `--env.CM_DOWNLOAD_PATH` - where to download file +* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading +* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then) +* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) + + +#### Variations + +* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) + + + +### Use wget without SSL certificate verification + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no +``` + +### Use curl without SSL certificate verification + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _curl" --verify=no +``` + +### Check MD5SUM + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another file + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another place + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Reuse local file instead of downloading a file + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j +``` + + +### Simplified language to download, store and extract file + + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --store=$HOME/dir1 --to=$HOME/dir2 +``` + + + +## Download and extract files with CM caching + +You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. +In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: + +```bash +cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations +``` + +You can find it in CM cache using extra cache tags as follows: +```bash +cm show cache "dae file annotations coco 2017 val" +``` diff --git a/script/download-and-extract/README.md b/script/download-and-extract/README.md new file mode 100644 index 0000000000..af0a40906b --- /dev/null +++ b/script/download-and-extract/README.md @@ -0,0 +1,216 @@ +Automatically generated README for this automation recipe: **download-and-extract** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=download-and-extract,c67e81a4ce2649f5) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-and-extract)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *download-and-extract,file* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "download-and-extract file" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=download-and-extract,file` + +`cm run script --tags=download-and-extract,file[,variations] [--input_flags]` + +*or* + +`cmr "download-and-extract file"` + +`cmr "download-and-extract file [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'download-and-extract,file' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="download-and-extract,file"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=download-and-extract,file) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "download-and-extract file[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_extract` + - Environment variables: + - *CM_DAE_EXTRACT_DOWNLOADED*: `yes` + - Workflow: + * `_keep` + - Environment variables: + - *CM_EXTRACT_REMOVE_EXTRACTED*: `no` + - Workflow: + * `_no-remove-extracted` + - Environment variables: + - *CM_EXTRACT_REMOVE_EXTRACTED*: `no` + - Workflow: + * `_url.#` + - Environment variables: + - *CM_DAE_URL*: `#` + - Workflow: + +
+ + + * Group "**download-tool**" +
+ Click here to expand this section. + + * **`_cmutil`** (default) + - Workflow: + * `_curl` + - Workflow: + * `_gdown` + - Workflow: + * `_rclone` + - Workflow: + * `_torrent` + - Environment variables: + - *CM_DAE_DOWNLOAD_USING_TORRENT*: `yes` + - *CM_TORRENT_DOWNLOADED_FILE_NAME*: `<<>>` + - *CM_TORRENT_DOWNLOADED_PATH_ENV_KEY*: `CM_DAE_FILEPATH` + - *CM_TORRENT_WAIT_UNTIL_COMPLETED*: `yes` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * download,torrent + - CM script: [download-torrent](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-torrent) + * `_wget` + - Workflow: + +
+ + +#### Default variations + +`_cmutil` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--download_path=value` → `CM_DOWNLOAD_PATH=value` +* `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value` +* `--extract_path=value` → `CM_EXTRACT_PATH=value` +* `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` +* `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` +* `--store=value` → `CM_DOWNLOAD_PATH=value` +* `--to=value` → `CM_EXTRACT_PATH=value` +* `--url=value` → `CM_DAE_URL=value` +* `--verify=value` → `CM_VERIFY_SSL=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "download_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-and-extract/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-and-extract/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-and-extract/_cm.json)*** + * download,file + * `if (CM_DAE_DOWNLOAD_USING_TORRENT not in ['yes', 'True'])` + * CM names: `--adr.['download-script']...` + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + 1. ***Run native script if exists*** + 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-and-extract/_cm.json)*** + * extract,file + * `if (CM_DAE_EXTRACT_DOWNLOADED in ['yes', 'True'])` + * CM names: `--adr.['extract-script']...` + - CM script: [extract-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/extract-file) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-and-extract/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-and-extract/_cm.json) + +___ +### Script output +`cmr "download-and-extract file [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `<<>>` +* `<<>>` +* `CM_DOWNLOAD_DOWNLOADED_PATH*` +* `CM_EXTRACT_EXTRACTED_PATH` +* `CM_GET_DEPENDENT_CACHED_PATH` +#### New environment keys auto-detected from customize + +* `CM_GET_DEPENDENT_CACHED_PATH` \ No newline at end of file diff --git a/script/download-and-extract/_cm.json b/script/download-and-extract/_cm.json new file mode 100644 index 0000000000..a8d6cccf20 --- /dev/null +++ b/script/download-and-extract/_cm.json @@ -0,0 +1,162 @@ +{ + "alias": "download-and-extract", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "can_force_cache": true, + "category": "DevOps automation", + "deps": [], + "input_description": {}, + "input_mapping": { + "download_path": "CM_DOWNLOAD_PATH", + "extra_folder": "CM_EXTRACT_TO_FOLDER", + "extract_path": "CM_EXTRACT_PATH", + "from": "CM_DOWNLOAD_LOCAL_FILE_PATH", + "local_path": "CM_DOWNLOAD_LOCAL_FILE_PATH", + "store": "CM_DOWNLOAD_PATH", + "to": "CM_EXTRACT_PATH", + "url": "CM_DAE_URL", + "verify": "CM_VERIFY_SSL" + }, + "new_env_keys": [ + "CM_DOWNLOAD_DOWNLOADED_PATH*", + "CM_EXTRACT_EXTRACTED_PATH", + "<<>>", + "<<>>", + "CM_GET_DEPENDENT_CACHED_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [ + { + "enable_if_env": { + "CM_DAE_EXTRACT_DOWNLOADED": [ + "yes", + "True" + ] + }, + "names": [ + "extract-script" + ], + "tags": "extract,file", + "update_tags_from_env_with_prefix": { + "_path.": [ + "CM_DOWNLOAD_DOWNLOADED_PATH", + "CM_TORRENT_DOWNLOADED_PATH" + ] + } + } + ], + "prehook_deps": [ + { + "names": [ + "download-script" + ], + "skip_if_env": { + "CM_DAE_DOWNLOAD_USING_TORRENT": [ + "yes", + "True" + ] + }, + "tags": "download,file", + "update_tags_from_env_with_prefix": { + "_url.": [ + "CM_DAE_URL" + ] + } + } + ], + "tags": [ + "dae", + "file", + "download-and-extract" + ], + "tags_help": "download-and-extract file", + "uid": "c67e81a4ce2649f5", + "variations": { + "cmutil": { + "add_deps_recursive": { + "download-script": { + "tags": "_cmutil" + } + }, + "default": true, + "group": "download-tool" + }, + "curl": { + "add_deps_recursive": { + "download-script": { + "tags": "_wget" + } + }, + "group": "download-tool" + }, + "extract": { + "env": { + "CM_DAE_EXTRACT_DOWNLOADED": "yes" + } + }, + "rclone": { + "add_deps_recursive": { + "download-script": { + "tags": "_rclone" + } + }, + "group": "download-tool" + }, + "gdown": { + "add_deps_recursive": { + "download-script": { + "tags": "_gdown" + } + }, + "group": "download-tool" + }, + "keep": { + "env": { + "CM_EXTRACT_REMOVE_EXTRACTED": "no" + } + }, + "no-remove-extracted": { + "env": { + "CM_EXTRACT_REMOVE_EXTRACTED": "no" + } + }, + "torrent": { + "env": { + "CM_DAE_DOWNLOAD_USING_TORRENT": "yes", + "CM_TORRENT_DOWNLOADED_FILE_NAME": "<<>>", + "CM_TORRENT_DOWNLOADED_PATH_ENV_KEY": "CM_DAE_FILEPATH", + "CM_TORRENT_WAIT_UNTIL_COMPLETED": "yes" + }, + "group": "download-tool", + "new_env_keys": [ + "CM_TORRENT_DOWNLOADED_PATH" + ], + "prehook_deps": [ + { + "tags": "download,torrent", + "update_tags_from_env_with_prefix": { + "_torrent.": [ + "CM_DAE_TORRENT_PATH" + ] + } + } + ] + }, + "url.#": { + "env": { + "CM_DAE_URL": "#" + } + }, + "wget": { + "add_deps_recursive": { + "download-script": { + "tags": "_wget" + } + }, + "group": "download-tool" + } + }, + "versions": {} +} diff --git a/script/download-and-extract/customize.py b/script/download-and-extract/customize.py new file mode 100644 index 0000000000..db27947e60 --- /dev/null +++ b/script/download-and-extract/customize.py @@ -0,0 +1,61 @@ +from cmind import utils +import os +import hashlib + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if i['input'].get('force_cache'): + extra_cache_tags = i['input'].get('extra_cache_tags', '') + r = automation.update_deps({ + 'deps': meta['prehook_deps'] + meta['posthook_deps'], + 'update_deps':{ + 'download-script': { + 'extra_cache_tags': extra_cache_tags, + 'force_cache': True + }, + 'extract-script':{ + 'extra_cache_tags': extra_cache_tags, + 'force_cache': True + } + } + }) + if r['return']>0: return r + + if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): + filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] + + if not os.path.exists(filepath): + return {'return':1, 'error':'Local file {} doesn\'t exist'.format(filepath)} + + env['CM_EXTRACT_REMOVE_EXTRACTED']='no' + + return {'return':0} + +def postprocess(i): + + env = i['env'] + filepath = env.get('CM_EXTRACT_EXTRACTED_PATH', '') + if filepath == '': + filepath = env.get('CM_DOWNLOAD_DOWNLOADED_PATH', '') + + if filepath == '': + return {'return':1, 'error': 'No extracted path set in "CM_EXTRACT_EXTRACTED_PATH"'} + if not os.path.exists(filepath): + return {'return':1, 'error': 'Extracted path doesn\'t exist: {}'.format(filepath)} + + if env.get('CM_DAE_FINAL_ENV_NAME'): + env[env['CM_DAE_FINAL_ENV_NAME']] = filepath + + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + + return {'return':0} diff --git a/script/download-and-extract/tests/download-and-extract-file.bat b/script/download-and-extract/tests/download-and-extract-file.bat new file mode 100644 index 0000000000..0688461de6 --- /dev/null +++ b/script/download-and-extract/tests/download-and-extract-file.bat @@ -0,0 +1 @@ +cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-and-extract/tests/download-and-extract-file2.bat b/script/download-and-extract/tests/download-and-extract-file2.bat new file mode 100644 index 0000000000..af344b9276 --- /dev/null +++ b/script/download-and-extract/tests/download-and-extract-file2.bat @@ -0,0 +1 @@ +cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-file/README-extra.md b/script/download-file/README-extra.md new file mode 100644 index 0000000000..b645b04190 --- /dev/null +++ b/script/download-file/README-extra.md @@ -0,0 +1,98 @@ +# CM interface to download files in a unified way on any system + +## Download file without CM caching + +### Use internal CM download function + +This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157) +to download a given file to the current directory: + +```bash +cmr "download file" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip +``` +or + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "download file" _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip -j +``` + +```json + "new_env": { + "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip" + }, +``` + +#### Input flags and equivalent environment variables + +* `--url` or `--env.CM_DAE_URL` - URL to download file +* `--download_path` or `--to` or `--env.CM_DOWNLOAD_PATH` - where to download file +* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading +* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification + + +### Use wget without SSL certificate verification + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no +``` + +### Use curl without SSL certificate verification + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _curl" --verify=no +``` + +### Check MD5SUM + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another file + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another place + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Reuse local file instead of downloading a file + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j +``` + +Output environment variables produced by this CM script: +```json + "new_env": { + "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work\\coco-2017-val-annotations.zip" + } +``` + +## Download file with CM caching + +You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. +In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations +``` + +You can find it in CM cache using extra cache tags as follows: +```bash +cm show cache "download file annotations coco 2017 val" +``` diff --git a/script/download-file/README.md b/script/download-file/README.md new file mode 100644 index 0000000000..ba19cd3b6c --- /dev/null +++ b/script/download-file/README.md @@ -0,0 +1,204 @@ +Automatically generated README for this automation recipe: **download-file** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=download-file,9cdc8dc41aae437e) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *download,file* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "download file" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=download,file` + +`cm run script --tags=download,file[,variations] [--input_flags]` + +*or* + +`cmr "download file"` + +`cmr "download file [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'download,file' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="download,file"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=download,file) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "download file[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_url.#` + - Environment variables: + - *CM_DOWNLOAD_URL*: `#` + - Workflow: + +
+ + + * Group "**download-tool**" +
+ Click here to expand this section. + + * **`_cmutil`** (default) + - Environment variables: + - *CM_DOWNLOAD_TOOL*: `cmutil` + - Workflow: + * `_curl` + - Environment variables: + - *CM_DOWNLOAD_TOOL*: `curl` + - Workflow: + * `_gdown` + - Environment variables: + - *CM_DOWNLOAD_TOOL*: `gdown` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.gdown + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_rclone` + - Environment variables: + - *CM_DOWNLOAD_TOOL*: `rclone` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,rclone + - CM script: [get-rclone](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-rclone) + * `_wget` + - Environment variables: + - *CM_DOWNLOAD_TOOL*: `wget` + - Workflow: + +
+ + +#### Default variations + +`_cmutil` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--download_path=value` → `CM_DOWNLOAD_PATH=value` +* `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` +* `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` +* `--md5sum=value` → `CM_DOWNLOAD_CHECKSUM=value` +* `--store=value` → `CM_DOWNLOAD_PATH=value` +* `--url=value` → `CM_DOWNLOAD_URL=value` +* `--verify=value` → `CM_VERIFY_SSL=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "download_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_RCLONE_COPY_USING: `sync` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-file/_cm.json) + +___ +### Script output +`cmr "download file [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `<<>>` +* `CM_DOWNLOAD_DOWNLOADED_PATH` +* `CM_GET_DEPENDENT_CACHED_PATH` +#### New environment keys auto-detected from customize + +* `CM_DOWNLOAD_DOWNLOADED_PATH` +* `CM_GET_DEPENDENT_CACHED_PATH` \ No newline at end of file diff --git a/script/download-file/_cm.json b/script/download-file/_cm.json new file mode 100644 index 0000000000..9b960e7f60 --- /dev/null +++ b/script/download-file/_cm.json @@ -0,0 +1,93 @@ +{ + "alias": "download-file", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "can_force_cache": true, + "category": "DevOps automation", + "deps": [ + { + "tags": "detect,os" + } + ], + "default_env": { + "CM_RCLONE_COPY_USING": "sync" + }, + "input_description": {}, + "input_mapping": { + "download_path": "CM_DOWNLOAD_PATH", + "from": "CM_DOWNLOAD_LOCAL_FILE_PATH", + "local_path": "CM_DOWNLOAD_LOCAL_FILE_PATH", + "store": "CM_DOWNLOAD_PATH", + "url": "CM_DOWNLOAD_URL", + "verify": "CM_VERIFY_SSL", + "md5sum": "CM_DOWNLOAD_CHECKSUM" + }, + "new_env_keys": [ + "CM_DOWNLOAD_DOWNLOADED_PATH", + "<<>>", + "CM_GET_DEPENDENT_CACHED_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "prehook_deps": [], + "tags": [ + "download", + "file", + "download-file" + ], + "tags_help": "download file", + "uid": "9cdc8dc41aae437e", + "variations": { + "cmutil": { + "default": true, + "env": { + "CM_DOWNLOAD_TOOL": "cmutil" + }, + "group": "download-tool" + }, + "curl": { + "default_env": { + "CM_DOWNLOAD_CURL_EMULATE_BROWSER": "no" + }, + "env": { + "CM_DOWNLOAD_TOOL": "curl" + }, + "group": "download-tool" + }, + "gdown": { + "deps": [ + { + "tags": "get,generic-python-lib,_package.gdown" + } + ], + "env": { + "CM_DOWNLOAD_TOOL": "gdown" + }, + "group": "download-tool" + }, + "rclone": { + "deps": [ + { + "tags": "get,rclone" + } + ], + "env": { + "CM_DOWNLOAD_TOOL": "rclone" + }, + "group": "download-tool" + }, + "url.#": { + "env": { + "CM_DOWNLOAD_URL": "#" + } + }, + "wget": { + "env": { + "CM_DOWNLOAD_TOOL": "wget" + }, + "group": "download-tool" + } + }, + "versions": {} +} diff --git a/script/download-file/customize.py b/script/download-file/customize.py new file mode 100644 index 0000000000..9c68d96083 --- /dev/null +++ b/script/download-file/customize.py @@ -0,0 +1,167 @@ +from cmind import utils +import os +import hashlib + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + tool = env.get('CM_DOWNLOAD_TOOL', '') + + if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): + filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] + + if not os.path.exists(filepath): + return {'return':1, 'error':'Local file {} doesn\'t exist'.format(filepath)} + + env['CM_DOWNLOAD_CMD'] = "" + + env['CM_DOWNLOAD_FILENAME'] = filepath + + if not quiet: + print ('') + print ('Using local file: {}'.format(filepath)) + else: + url = env.get('CM_DOWNLOAD_URL','') + + if url=='': + return {'return':1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'} + + print ('') + print ('Downloading from {}'.format(url)) + + if '&' in url and tool != "cmutil": + if os_info['platform'] == 'windows': + url = '"'+url+'"' + else: + url = url.replace('&','\\&') + + extra_download_options = env.get('CM_DOWNLOAD_EXTRA_OPTIONS', '') + + verify_ssl = env.get('CM_VERIFY_SSL', "True") + if str(verify_ssl).lower() in [ "no", "false" ]: + verify_ssl = False + if tool == 'wget': + extra_download_options += " --no-check-certificate" + else: + verify_ssl = True + + if env.get('CM_DOWNLOAD_PATH', '') != '': + download_path = env['CM_DOWNLOAD_PATH'] + if not os.path.exists(download_path): + os.makedirs(download_path, exist_ok = True) + os.chdir(download_path) + + if env.get('CM_DOWNLOAD_FILENAME', '') == '': + urltail = os.path.basename(env['CM_DOWNLOAD_URL']) + urlhead = os.path.dirname(env['CM_DOWNLOAD_URL']) + if "." in urltail and "/" in urlhead: + # Check if ? after filename + j = urltail.find('?') + if j>0: + urltail=urltail[:j] + env['CM_DOWNLOAD_FILENAME'] = urltail + elif env.get('CM_DOWNLOAD_TOOL', '') == "rclone": + env['CM_DOWNLOAD_FILENAME'] = urltail + else: + env['CM_DOWNLOAD_FILENAME'] = "index.html" + + if tool == "cmutil": + print ('') + + cm = automation.cmind + for i in range(1,5): + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url, + 'verify': verify_ssl}) + if r['return'] == 0: break + oldurl = url + url = env.get('CM_DOWNLOAD_URL'+str(i),'') + if url == '': + break + print(f"Download from {oldurl} failed, trying from {url}") + + if r['return']>0: return r + + env['CM_DOWNLOAD_CMD'] = "" + env['CM_DOWNLOAD_FILENAME'] = r['filename'] + + elif tool == "wget": + if env.get('CM_DOWNLOAD_FILENAME', '') != '': + extra_download_options +=' -O '+env['CM_DOWNLOAD_FILENAME']+' ' + env['CM_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}" + + elif tool == "curl": + if env.get('CM_DOWNLOAD_FILENAME', '') != '': + extra_download_options +=' --output '+env['CM_DOWNLOAD_FILENAME']+' ' + + env['CM_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}" + + + elif tool == "gdown": + env['CM_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}" + + elif tool == "rclone": + if env.get('CM_RCLONE_CONFIG_CMD', '') != '': + env['CM_DOWNLOAD_CONFIG_CMD'] = env['CM_RCLONE_CONFIG_CMD'] + rclone_copy_using = env.get('CM_RCLONE_COPY_USING', 'sync') + env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {url} {os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])} -P" + + filename = env['CM_DOWNLOAD_FILENAME'] + env['CM_DOWNLOAD_DOWNLOADED_FILENAME'] = filename + + filename = os.path.basename(env['CM_DOWNLOAD_FILENAME']) + filepath = os.path.join(os.getcwd(), filename) + + env['CM_DOWNLOAD_DOWNLOADED_PATH'] = filepath + + #verify checksum if file already present + if env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + x='*' if os_info['platform'] == 'windows' else '' + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum -c".format(env.get('CM_DOWNLOAD_CHECKSUM'), x, env['CM_DOWNLOAD_FILENAME']) + else: + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "" + + if os_info['platform'] == 'windows': + # Check that if empty CMD, should add "" + for x in ['CM_DOWNLOAD_CMD', 'CM_DOWNLOAD_CHECKSUM_CMD']: + env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO' + + return {'return':0} + +def postprocess(i): + + automation = i['automation'] + + env = i['env'] + + filepath = env['CM_DOWNLOAD_DOWNLOADED_PATH'] + + if not os.path.exists(filepath): + return {'return':1, 'error': 'Downloaded path {} does not exist. Probably CM_DOWNLOAD_FILENAME is not set and CM_DOWNLOAD_URL given is not pointing to a file'.format(filepath)} + + if env.get('CM_DOWNLOAD_RENAME_FILE', '') != '': + file_dir = os.path.dirname(filepath) + new_file_name = env['CM_DOWNLOAD_RENAME_FILE'] + new_file_path = os.path.join(file_dir, new_file_name) + os.rename(filepath, new_file_path) + filepath = new_file_path + + + if env.get('CM_DOWNLOAD_FINAL_ENV_NAME','') != '': + env[env['CM_DOWNLOAD_FINAL_ENV_NAME']] = filepath + + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + + # Since may change directory, check if need to clean some temporal files + automation.clean_some_tmp_files({'env':env}) + + return {'return':0} diff --git a/script/download-file/run.bat b/script/download-file/run.bat new file mode 100644 index 0000000000..b341a01e40 --- /dev/null +++ b/script/download-file/run.bat @@ -0,0 +1,56 @@ +rem Download file + +rem If MD5 is wrong, download again! + +rem Next line allows ERRORLEVEL inside if statements! +setlocal enabledelayedexpansion + +if NOT "%CM_DOWNLOAD_CONFIG_CMD%" == "" ( + echo. + echo %CM_DOWNLOAD_CONFIG_CMD% + echo. + %CM_DOWNLOAD_CONFIG_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! +) + +set require_download=1 + +if not "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" ( + set require_download=0 +) + +if "%CM_DOWNLOAD_TOOL%" == "cmutil" ( + set require_download=0 +) + + +if exist "%CM_DOWNLOAD_DOWNLOADED_PATH%" ( + if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( + echo. + echo %CM_DOWNLOAD_CHECKSUM_CMD% + cmd /c %CM_DOWNLOAD_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 ( + if NOT "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" exit 1 + if "%CM_DOWNLOAD_CMD_USED%" == "NO" exit 1 + ) else ( + set require_download=0 + ) + ) +) + +if "!require_download!" == "1" ( + echo. + del /Q %CM_DOWNLOAD_FILENAME% + + echo. + echo %CM_DOWNLOAD_CMD% + cmd /c %CM_DOWNLOAD_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + + if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( + echo. + echo %CM_DOWNLOAD_CHECKSUM_CMD% + cmd /c %CM_DOWNLOAD_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT 1 + ) +) diff --git a/script/download-file/run.sh b/script/download-file/run.sh new file mode 100644 index 0000000000..6d922fc7e0 --- /dev/null +++ b/script/download-file/run.sh @@ -0,0 +1,53 @@ +#!/bin/bash + + +if [[ -n ${CM_DOWNLOAD_CONFIG_CMD} ]]; then + echo "" + echo "${CM_DOWNLOAD_CONFIG_CMD}" + eval "${CM_DOWNLOAD_CONFIG_CMD}" + test $? -eq 0 || exit $? +fi + +require_download=1 + +if [[ "${CM_DOWNLOAD_LOCAL_FILE_PATH}" != "" ]]; then + require_download=0 +fi + +if [[ ${CM_DOWNLOAD_TOOL} == "cmutil" ]]; then + require_download=0 +fi + +if [ -e ${CM_DOWNLOAD_DOWNLOADED_PATH} ]; then + if [[ "${CM_DOWNLOAD_CHECKSUM_CMD}" != "" ]]; then + echo "" + echo "${CM_DOWNLOAD_CHECKSUM_CMD}" + eval "${CM_DOWNLOAD_CHECKSUM_CMD}" + if [ $? -ne 0 ]; then + # checksum not supposed to fail for locally given file + if [[ "${CM_DOWNLOAD_LOCAL_FILE_PATH}" != "" ]]; then + exit 1 + fi + else + require_download="0" + fi + fi +fi + +if [[ ${require_download} == "1" ]]; then + echo "" + rm -f ${CM_DOWNLOAD_FILENAME} + + echo "" + echo "${CM_DOWNLOAD_CMD}" + eval "${CM_DOWNLOAD_CMD}" + test $? -eq 0 || exit $? + + if [[ "${CM_DOWNLOAD_CHECKSUM_CMD}" != "" ]]; then + echo "" + echo "${CM_DOWNLOAD_CHECKSUM_CMD}" + eval "${CM_DOWNLOAD_CHECKSUM_CMD}" + fi +fi + +test $? -eq 0 || exit $? diff --git a/script/download-file/tests/download-file.bat b/script/download-file/tests/download-file.bat new file mode 100644 index 0000000000..4421502823 --- /dev/null +++ b/script/download-file/tests/download-file.bat @@ -0,0 +1,2 @@ +cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 + diff --git a/script/download-file/tests/download-file2.bat b/script/download-file/tests/download-file2.bat new file mode 100644 index 0000000000..2032bc177a --- /dev/null +++ b/script/download-file/tests/download-file2.bat @@ -0,0 +1 @@ +cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-torrent/README.md b/script/download-torrent/README.md new file mode 100644 index 0000000000..9128bf3cf6 --- /dev/null +++ b/script/download-torrent/README.md @@ -0,0 +1,157 @@ +Automatically generated README for this automation recipe: **download-torrent** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=download-torrent,69b752c5618e45bb) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *download,torrent,download-torrent* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "download torrent download-torrent" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=download,torrent,download-torrent` + +`cm run script --tags=download,torrent,download-torrent[,variations] [--input_flags]` + +*or* + +`cmr "download torrent download-torrent"` + +`cmr "download torrent download-torrent [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'download,torrent,download-torrent' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="download,torrent,download-torrent"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=download,torrent,download-torrent) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "download torrent download-torrent[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_torrent.#` + - Environment variables: + - *CM_TORRENT_FILE*: `#` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--wait=value` → `CM_TORRENT_WAIT_UNTIL_COMPLETED=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "wait":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_TORRENT_WAIT_UNTIL_COMPLETED: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent/_cm.json)*** + * get,generic-sys-util,_transmission + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/download-torrent/_cm.json) + +___ +### Script output +`cmr "download torrent download-torrent [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `<<>>` +* `CM_TORRENT_DOWNLOADED_PATH` +#### New environment keys auto-detected from customize + +* `CM_TORRENT_DOWNLOADED_PATH` \ No newline at end of file diff --git a/script/download-torrent/_cm.json b/script/download-torrent/_cm.json new file mode 100644 index 0000000000..47e8a113fd --- /dev/null +++ b/script/download-torrent/_cm.json @@ -0,0 +1,41 @@ +{ + "alias": "download-torrent", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "DevOps automation", + "deps": [ + { + "tags": "get,generic-sys-util,_transmission" + } + ], + "input_description": {}, + "input_mapping": { + "wait": "CM_TORRENT_WAIT_UNTIL_COMPLETED" + }, + "default_env": { + "CM_TORRENT_WAIT_UNTIL_COMPLETED": "no" + }, + "new_env_keys": [ + "CM_TORRENT_DOWNLOADED_PATH", + "<<>>" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "download", + "torrent", + "download-torrent" + ], + "uid": "69b752c5618e45bb", + "variations": { + "torrent.#": { + "env": { + "CM_TORRENT_FILE": "#" + } + } + }, + "versions": {} +} diff --git a/script/download-torrent/customize.py b/script/download-torrent/customize.py new file mode 100644 index 0000000000..52b57f253b --- /dev/null +++ b/script/download-torrent/customize.py @@ -0,0 +1,33 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if not env.get('CM_TORRENT_DOWNLOADED_FILE_NAME'): + return {'return':1, 'error': 'CM_TORRENT_DOWNLOADED_FILE_NAME is not set' } + + return {'return':0} + +def postprocess(i): + + env = i['env'] + torrent_downloaded_path = os.path.join(env['CM_TORRENT_DOWNLOADED_DIR'], env['CM_TORRENT_DOWNLOADED_NAME']) + env['CM_TORRENT_DOWNLOADED_PATH'] = torrent_downloaded_path + + if 'CM_TORRENT_DOWNLOADED_PATH_ENV_KEY' in env: + key = env['CM_TORRENT_DOWNLOADED_PATH_ENV_KEY'] + env[key] = torrent_downloaded_path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = torrent_downloaded_path + + return {'return':0} diff --git a/script/download-torrent/run.sh b/script/download-torrent/run.sh new file mode 100644 index 0000000000..c3d639ff10 --- /dev/null +++ b/script/download-torrent/run.sh @@ -0,0 +1,34 @@ +#!/bin/bash +chmod 777 ${PWD} +#transmission-remote --no-auth --download-dir ${PWD} -a ${CM_TORRENT_FILE} +cmd="transmission-remote --download-dir ${PWD} -a ${CM_TORRENT_FILE}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? + +cmd="transmission-remote -l" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? + +if [[ ${CM_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then + while true; + do + out=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} | grep "100%"` + if [[ -z $out ]]; then + transmission-remote -l + sleep 6 + else + break + fi + done +fi + +id=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} |tr -s ' ' | cut -d' ' -f2` +test $? -eq 0 || exit $? +location=`transmission-remote -t${id} -i |grep Location |cut -d':' -f2 |tr -d ' '` +test $? -eq 0 || exit $? +echo "CM_TORRENT_DOWNLOADED_DIR=$location">> tmp-run-env.out +name=`transmission-remote -t${id} -i |grep Name |cut -d':' -f2 |tr -d ' '` +test $? -eq 0 || exit $? +echo "CM_TORRENT_DOWNLOADED_NAME=$name">> tmp-run-env.out diff --git a/script/dump-pip-freeze/README.md b/script/dump-pip-freeze/README.md new file mode 100644 index 0000000000..b0e1a0a372 --- /dev/null +++ b/script/dump-pip-freeze/README.md @@ -0,0 +1,121 @@ +Automatically generated README for this automation recipe: **dump-pip-freeze** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=dump-pip-freeze,33eb0a8006664cae) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *dump,pip,freeze* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "dump pip freeze" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=dump,pip,freeze` + +`cm run script --tags=dump,pip,freeze ` + +*or* + +`cmr "dump pip freeze"` + +`cmr "dump pip freeze " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'dump,pip,freeze' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="dump,pip,freeze"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=dump,pip,freeze) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "dump pip freeze" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/_cm.yaml)*** + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/dump-pip-freeze/_cm.yaml) + +___ +### Script output +`cmr "dump pip freeze " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/dump-pip-freeze/_cm.yaml b/script/dump-pip-freeze/_cm.yaml new file mode 100644 index 0000000000..39acd5eee3 --- /dev/null +++ b/script/dump-pip-freeze/_cm.yaml @@ -0,0 +1,16 @@ +alias: dump-pip-freeze +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- dump +- pip +- freeze +new_state_keys: + - pip_freeze +deps: + - tags: get,python + names: + - python + - python3 +uid: 33eb0a8006664cae diff --git a/script/dump-pip-freeze/customize.py b/script/dump-pip-freeze/customize.py new file mode 100644 index 0000000000..eb5eeab8af --- /dev/null +++ b/script/dump-pip-freeze/customize.py @@ -0,0 +1,52 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + if env.get('CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', '') == '': + env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(os.getcwd(), "tmp-pip-freeze") + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + automation = i['automation'] + + pip_freeze = {} + pip_freeze_file = env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] + if not os.path.isfile(pip_freeze_file): + # If was not created, sometimes issues on Windows + # There is another workaround + if os_info['platform'] == 'windows': + r = automation.cmind.access({'action':'system', + 'automation':'utils', + 'cmd':'py -m pip freeze', + 'stdout':pip_freeze_file}) + # skip output + + if os.path.isfile(pip_freeze_file): + with open(pip_freeze_file, "r") as f: + for line in f.readlines(): + if "==" in line: + split = line.split("==") + pip_freeze[split[0]] = split[1].strip() + + + state['pip_freeze'] = pip_freeze + + return {'return':0} diff --git a/script/dump-pip-freeze/dump.py b/script/dump-pip-freeze/dump.py new file mode 100644 index 0000000000..1d7f7ab853 --- /dev/null +++ b/script/dump-pip-freeze/dump.py @@ -0,0 +1,21 @@ +import os +from pip._internal.operations import freeze + +pip_freeze_out = os.environ.get('CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', 'tmp-pip-freeze') + +if os.path.isfile(pip_freeze_out): + os.remove(pip_freeze_out) + +pkgs = freeze.freeze() + +x = '' + +try: + for pkg in pkgs: + x+=pkg+'\n' +except: + pass + +if len(x)>0: + with open(pip_freeze_out, "w") as f: + f.write(x) diff --git a/script/dump-pip-freeze/run.bat b/script/dump-pip-freeze/run.bat new file mode 100644 index 0000000000..b323ddc22e --- /dev/null +++ b/script/dump-pip-freeze/run.bat @@ -0,0 +1,4 @@ +if not "%CM_FAKE_RUN%" == "yes" ( + %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\dump.py + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) diff --git a/script/dump-pip-freeze/run.sh b/script/dump-pip-freeze/run.sh new file mode 100644 index 0000000000..a1cdb52eb4 --- /dev/null +++ b/script/dump-pip-freeze/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +run "${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/dump.py" diff --git a/script/extract-file/README-extra.md b/script/extract-file/README-extra.md new file mode 100644 index 0000000000..fbd8ccaf4b --- /dev/null +++ b/script/extract-file/README-extra.md @@ -0,0 +1,115 @@ +# CM interface to extract files in a unified way on any system + +## Extract files without CM caching + +You can use this script to extract `.tar`, `.gz`, `.zip`, `.bz2`, `.tag.gz` and `.tgz` files. + +Before using further examples, you can download `coco-2017-val-annotations.zip` using CM: +```bash +cmr "download file" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip +``` + +Extract this archive in the current path while keeping the archive file: + +```bash +cmr "extract file _keep" --input=coco-2017-val-annotations.zip +``` + +or + +```bash +cmr "extract file _keep _path.coco-2017-val-annotations.zip" +``` + +You can remove `_keep` to delete archive after extracting files: + +```bash +cmr "extract file" --input=coco-2017-val-annotations.zip +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "extract file _keep" --input=coco-2017-val-annotations.zip -j +``` + +```json + "new_env": { + "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work99.3 readme\\xyz", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work99.3 readme\\xyz" + }, +``` + +#### Input flags and equivalent environment variables + +* `--input` or `--env.CM_EXTRACT_FILEPATH` - input file +* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then) +* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) + +#### Variations + +* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) + + + +### Extract to a specific folder + +Note that you need to provide a full path to the archive file if you want to extract it to some directory: + +```bash +cmr "extract file _keep" --input="$PWD/coco-2017-val-annotations.zip" --extract_path="$HOME/cm-test" +``` + +### Add extra folder to extracted files + +You can add extra folder when extracting files to avoid messing up current directory: + +```bash +cmr "extract file _keep" --input=coco-2017-val-annotations.zip --extra_folder=xyz +``` + + + + +## Extract 1 file and test MD5SUM without CM caching + +You can use this script to extract 1 archived file (model, captions, etc) and test MD5SUM. + +To test this CM script, download `captions_val2017.json.gz`: +```bash +cmr "download file _url.https://cKnowledge.org/test/captions_val2017.json.gz" +``` + +Then extract it and test MD5SUM as follows: + +```bash +cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -j +``` + + +### Force another filename during extract + +Some workflows may need to use a different filename than original. You can change it as follows: +```bash +cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_FILENAME=new-file.json --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f +``` + + + + +## Extract file(s) to CM cache + +You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. +In such case, file(s) will be extracted to the CM cache and can be reused by other CM scripts and workflows. +Note that you need to provide full path to the archive file. + +```bash +cmr "extract file _keep" --input=$HOME/coco-2017-val-annotations.zip --force_cache --extra_cache_tags=coco,2017,val,annotations +``` + +You can find it in CM cache using extra cache tags as follows: +```bash +cm show cache "extract file annotations coco 2017 val" +``` diff --git a/script/extract-file/README.md b/script/extract-file/README.md new file mode 100644 index 0000000000..9b7fcc0704 --- /dev/null +++ b/script/extract-file/README.md @@ -0,0 +1,170 @@ +Automatically generated README for this automation recipe: **extract-file** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=extract-file,3f0b76219d004817) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *extract,file* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "extract file" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=extract,file` + +`cm run script --tags=extract,file[,variations] [--input_flags]` + +*or* + +`cmr "extract file"` + +`cmr "extract file [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'extract,file' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="extract,file"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=extract,file) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "extract file[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_keep` + - Environment variables: + - *CM_EXTRACT_REMOVE_EXTRACTED*: `no` + - Workflow: + * `_no-remove-extracted` + - Environment variables: + - *CM_EXTRACT_REMOVE_EXTRACTED*: `no` + - Workflow: + * `_path.#` + - Environment variables: + - *CM_EXTRACT_FILEPATH*: `#` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value` +* `--extract_path=value` → `CM_EXTRACT_PATH=value` +* `--input=value` → `CM_EXTRACT_FILEPATH=value` +* `--to=value` → `CM_EXTRACT_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "extra_folder":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/extract-file/_cm.json) + +___ +### Script output +`cmr "extract file [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `<<>>` +* `CM_EXTRACT_EXTRACTED_PATH` +* `CM_GET_DEPENDENT_CACHED_PATH` +#### New environment keys auto-detected from customize + +* `CM_EXTRACT_EXTRACTED_PATH` +* `CM_GET_DEPENDENT_CACHED_PATH` \ No newline at end of file diff --git a/script/extract-file/_cm.json b/script/extract-file/_cm.json new file mode 100644 index 0000000000..0f1c4aae93 --- /dev/null +++ b/script/extract-file/_cm.json @@ -0,0 +1,53 @@ +{ + "alias": "extract-file", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "can_force_cache": true, + "category": "DevOps automation", + "deps": [ + { + "tags": "detect,os" + } + ], + "input_description": {}, + "input_mapping": { + "extra_folder": "CM_EXTRACT_TO_FOLDER", + "extract_path": "CM_EXTRACT_PATH", + "input": "CM_EXTRACT_FILEPATH", + "to": "CM_EXTRACT_PATH" + }, + "new_env_keys": [ + "CM_EXTRACT_EXTRACTED_PATH", + "<<>>", + "CM_GET_DEPENDENT_CACHED_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "extract", + "file" + ], + "tags_help": "extract file", + "uid": "3f0b76219d004817", + "variations": { + "keep": { + "env": { + "CM_EXTRACT_REMOVE_EXTRACTED": "no" + } + }, + "no-remove-extracted": { + "env": { + "CM_EXTRACT_REMOVE_EXTRACTED": "no" + } + }, + "path.#": { + "env": { + "CM_EXTRACT_FILEPATH": "#" + } + } + }, + "versions": {} +} diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py new file mode 100644 index 0000000000..6bbd2fe753 --- /dev/null +++ b/script/extract-file/customize.py @@ -0,0 +1,197 @@ +from cmind import utils +import os +import hashlib + +def preprocess(i): + + variation_tags = i.get('variation_tags',[]) + + os_info = i['os_info'] + + windows = os_info['platform'] == 'windows' + +# xsep = '^&^&' if windows else '&&' + xsep = '&&' + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + filename = env.get('CM_EXTRACT_FILEPATH','') + if filename == '': + return {'return': 1, 'error': 'Extract with no download requested and CM_EXTRACT_FILEPATH is not set'} + + env['CM_EXTRACT_FILENAME'] = filename + + # Check if extract to some path outside CM cache (to reuse large files later if cache is cleaned) + extract_path = env.get('CM_EXTRACT_PATH', '') + if extract_path != '': + if not os.path.exists(extract_path): + os.makedirs(extract_path, exist_ok = True) + + os.chdir(extract_path) + + # By default remove archive after extraction + remove_extracted = False if env.get('CM_EXTRACT_REMOVE_EXTRACTED','').lower() == 'no' else True + + if filename.endswith(".zip"): + env['CM_EXTRACT_TOOL'] = "unzip" + elif filename.endswith(".tar.gz"): + if windows: + x = '"' if ' ' in filename else '' + env['CM_EXTRACT_CMD0'] = 'gzip -d ' + x + filename + x + filename = filename[:-3] # leave only .tar + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + else: + env['CM_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf ' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".tar.xz"): + if windows: + x = '"' if ' ' in filename else '' + env['CM_EXTRACT_CMD0'] = 'xz -d ' + x + filename + x + filename = filename[:-3] # leave only .tar + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + else: + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvJf' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".tar"): + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".gz"): + # Check target filename + extracted_filename = env.get('CM_EXTRACT_EXTRACTED_FILENAME','') + if extracted_filename == '': + extracted_filename = os.path.basename(filename)[:-3] + env['CM_EXTRACT_EXTRACTED_FILENAME'] = extracted_filename + + x = '-c' if windows else '-k' + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d '+ (x + ' ' if not remove_extracted else '') + ' > ' + extracted_filename + ' < ' + + env['CM_EXTRACT_TOOL'] = 'gzip ' + elif env.get('CM_EXTRACT_UNZIP','') == 'yes': + env['CM_EXTRACT_TOOL'] = 'unzip ' + elif env.get('CM_EXTRACT_UNTAR','') == 'yes': + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif env.get('CM_EXTRACT_GZIP','') == 'yes': + env['CM_EXTRACT_CMD'] = 'gzip ' + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d '+ ('-k ' if not remove_extracted else '') + else: + return {'return': 1, 'error': 'Neither CM_EXTRACT_UNZIP nor CM_EXTRACT_UNTAR is yes'} + + env['CM_EXTRACT_PRE_CMD'] = '' + + extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') + + # Check if extract to additional folder in the current directory (or external path) + # to avoid messing up other files and keep clean directory structure + # particularly if archive has many sub-directories and files + if extract_to_folder != '': + if 'tar ' in env['CM_EXTRACT_TOOL']: + x = '' if windows else '-p' + + #env['CM_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['CM_EXTRACT_TO_FOLDER'] + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -C '+ extract_to_folder + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + env['CM_EXTRACT_PRE_CMD'] = 'mkdir '+x+' '+ extract_to_folder + ' ' + xsep + ' ' + env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder + + elif 'unzip' in env['CM_EXTRACT_TOOL']: + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d '+ extract_to_folder + env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder + + + x = '"' if ' ' in filename else '' + env['CM_EXTRACT_CMD'] = env['CM_EXTRACT_PRE_CMD'] + env['CM_EXTRACT_TOOL'] + ' ' + \ + env.get('CM_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '')+ ' '+ x + filename + x + + print ('') + print ('Current directory: {}'.format(os.getcwd())) + print ('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) + print ('') + + final_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + + if final_file!='': + if env.get('CM_EXTRACT_EXTRACTED_CHECKSUM_FILE', '') != '': + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = ("cd {} " + xsep + " md5sum -c {}").format(final_file, env.get('CM_EXTRACT_EXTRACTED_CHECKSUM_FILE')) + elif env.get('CM_EXTRACT_EXTRACTED_CHECKSUM', '') != '': + x='*' if os_info['platform'] == 'windows' else '' + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{} | md5sum -c".format(env.get('CM_EXTRACT_EXTRACTED_CHECKSUM'), x, env['CM_EXTRACT_EXTRACTED_FILENAME']) + else: + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" + else: + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" + +# Not needed - can be simpler with cmd /c {empty} +# if os_info['platform'] == 'windows': +# # Check that if empty CMD, should add "" +# for x in ['CM_EXTRACT_CMD', 'CM_EXTRACT_EXTRACTED_CHECKSUM_CMD']: +# env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO' + + + # If force cache, add filepath to tag unless _path is used ... + path_tag = 'path.'+filename + + add_extra_cache_tags = [] + if path_tag not in variation_tags: + add_extra_cache_tags.append(path_tag) + + return {'return':0, 'add_extra_cache_tags':add_extra_cache_tags} + + +def postprocess(i): + + automation = i['automation'] + + env = i['env'] + + extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') + extract_path = env.get('CM_EXTRACT_PATH', '') + + extracted_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + + # Preparing filepath + # Can be either full extracted filename (such as model) or folder + + if extracted_file != '': + filename = os.path.basename(extracted_file) + +# We do not use this env variable anymore +# folderpath = env.get('CM_EXTRACT_EXTRACT_TO_PATH', '') + folderpath = extract_path if extract_path!='' else os.getcwd() + + filepath = os.path.join(folderpath, filename) + else: + + filepath = os.getcwd() # Extracted to the root cache folder + + if not os.path.exists(filepath): + return {'return':1, 'error': 'Path {} was not created or doesn\'t exist'.format(filepath)} +# return {'return':1, 'error': 'CM_EXTRACT_EXTRACTED_FILENAME and CM_EXTRACT_TO_FOLDER are not set'} + + env['CM_EXTRACT_EXTRACTED_PATH'] = filepath + + # Set external environment variable with the final path + if env.get('CM_EXTRACT_FINAL_ENV_NAME', '')!='': + env[env['CM_EXTRACT_FINAL_ENV_NAME']] = filepath + + # Detect if this file will be deleted or moved + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + + # Check if need to remove archive after extraction + if env.get('CM_EXTRACT_REMOVE_EXTRACTED','').lower() != 'no': + archive_filepath=env.get('CM_EXTRACT_FILEPATH','') + if archive_filepath!='' and os.path.isfile(archive_filepath): + os.remove(archive_filepath) + + # Since may change directory, check if need to clean some temporal files + automation.clean_some_tmp_files({'env':env}) + + return {'return':0} diff --git a/script/extract-file/run.bat b/script/extract-file/run.bat new file mode 100644 index 0000000000..530ebbd2c3 --- /dev/null +++ b/script/extract-file/run.bat @@ -0,0 +1,39 @@ +rem Extract file + +rem If MD5 is wrong, extrat again! + +rem Next line allows ERRORLEVEL inside if statements! +setlocal enabledelayedexpansion + +set require_extract=1 + +if exist "%CM_EXTRACT_EXTRACTED_FILENAME%" ( + set require_extract=0 + + echo. + echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 ( + set require_extract=1 + del /Q %CM_EXTRACT_EXTRACTED_FILENAME% + ) +) + +if "!require_extract!" == "1" ( + if not "%CM_EXTRACT_CMD0%" == "" ( + echo. + echo %CM_EXTRACT_CMD0% + cmd /c %CM_EXTRACT_CMD0% + IF !ERRORLEVEL! NEQ 0 EXIT 1 + ) + + echo. + echo %CM_EXTRACT_CMD% + cmd /c %CM_EXTRACT_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT 1 + + echo. + echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT 1 +) diff --git a/script/extract-file/run.sh b/script/extract-file/run.sh new file mode 100644 index 0000000000..4ee4f8512b --- /dev/null +++ b/script/extract-file/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +if [ -e "${CM_EXTRACT_EXTRACTED_FILENAME}" ] ; then + CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD} + echo "" + echo "${CMD}" + eval "${CMD}" + test $? -eq 0 && exit 0 +fi + +CMD=${CM_EXTRACT_CMD} +echo "" +echo "${CMD}" +eval "${CMD}" +test $? -eq 0 || exit $? + +CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD} +echo "" +echo "${CMD}" +eval "${CMD}" +test $? -eq 0 || exit $? diff --git a/script/fail/README-extra.md b/script/fail/README-extra.md new file mode 100644 index 0000000000..582991f6d2 --- /dev/null +++ b/script/fail/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/script/fail/README.md b/script/fail/README.md new file mode 100644 index 0000000000..6a84f0efa8 --- /dev/null +++ b/script/fail/README.md @@ -0,0 +1,134 @@ +Automatically generated README for this automation recipe: **fail** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=fail,3aaee82e19d243cd) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/fail)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *fail,filter* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "fail filter" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=fail,filter` + +`cm run script --tags=fail,filter[,variations] ` + +*or* + +`cmr "fail filter"` + +`cmr "fail filter [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'fail,filter' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="fail,filter"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=fail,filter) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "fail filter[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_windows` + - Environment variables: + - *CM_FAIL_WINDOWS*: `True` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/fail/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/fail/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/fail/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/fail/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/fail/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/fail/_cm.yaml) + +___ +### Script output +`cmr "fail filter [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/fail/_cm.yaml b/script/fail/_cm.yaml new file mode 100644 index 0000000000..9c5d8fcfc8 --- /dev/null +++ b/script/fail/_cm.yaml @@ -0,0 +1,18 @@ +uid: 3aaee82e19d243cd +alias: fail + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "DevOps automation" + +cache: false + +tags: +- fail +- filter + +variations: + windows: + env: + CM_FAIL_WINDOWS: true diff --git a/script/fail/customize.py b/script/fail/customize.py new file mode 100644 index 0000000000..855c39b5bb --- /dev/null +++ b/script/fail/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + # Checking conditions + if env.get('CM_FAIL_WINDOWS','').lower()=='true': + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'CM detected fail condition: running on Windows'} + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/flash-tinyml-binary/README-extra.md b/script/flash-tinyml-binary/README-extra.md new file mode 100644 index 0000000000..1c50fc8e7e --- /dev/null +++ b/script/flash-tinyml-binary/README-extra.md @@ -0,0 +1,16 @@ +This script flashes the ELF binary using Zephyr. +## Install +```bash +cm run script --tags=flash,tiny,_[VARIANT],_[MODEL] +``` +where, +* `[VARIANT]` is one of `cmsis_nn`,`native` +* `[MODEL]` is one of `ad`, `ic`, `kws`, `vww` + +We can also pass a known build directory like here: + +```bash +cm run script --tags=flash,tiny --build_dir=[BUILD_DIR] +``` +where, +* `[BUILD_DIR]` is the build folder containing the zephyr folder which in turn contains the built ELF binary diff --git a/script/flash-tinyml-binary/README.md b/script/flash-tinyml-binary/README.md new file mode 100644 index 0000000000..0dc325131b --- /dev/null +++ b/script/flash-tinyml-binary/README.md @@ -0,0 +1,176 @@ +Automatically generated README for this automation recipe: **flash-tinyml-binary** + +Category: **TinyML automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=flash-tinyml-binary,98913babb43f4fcb) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *flash,tiny,mlperf,mlcommons* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "flash tiny mlperf mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=flash,tiny,mlperf,mlcommons` + +`cm run script --tags=flash,tiny,mlperf,mlcommons[,variations] [--input_flags]` + +*or* + +`cmr "flash tiny mlperf mlcommons"` + +`cmr "flash tiny mlperf mlcommons [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'flash,tiny,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="flash,tiny,mlperf,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=flash,tiny,mlperf,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "flash tiny mlperf mlcommons[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_NRF` + - Workflow: + * `_NUCLEO` + - Workflow: + * `_ad` + - Workflow: + * `_cmsis_nn` + - Workflow: + * `_ic` + - Workflow: + * `_kws` + - Workflow: + * `_native` + - Workflow: + * `_vww` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--build_dir=value` → `CM_TINY_BUILD_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "build_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `r1.0` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,zephyr + * CM names: `--adr.['zephyr']...` + - CM script: [get-zephyr](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zephyr) + * get,zephyr-sdk + * CM names: `--adr.['zephyr-sdk']...` + - CM script: [get-zephyr-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zephyr-sdk) + * reproduce,tiny,mlperf + * `if (CM_TINY_BUILD_DIR != on)` + - CM script: [reproduce-mlperf-octoml-tinyml-results](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/flash-tinyml-binary/_cm.json) + +___ +### Script output +`cmr "flash tiny mlperf mlcommons [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/flash-tinyml-binary/_cm.json b/script/flash-tinyml-binary/_cm.json new file mode 100644 index 0000000000..d9ee385bc1 --- /dev/null +++ b/script/flash-tinyml-binary/_cm.json @@ -0,0 +1,57 @@ +{ + "alias": "flash-tinyml-binary", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "TinyML automation", + "cache": false, + "default_version": "r1.0", + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "zephyr" + ], + "tags": "get,zephyr" + }, + { + "names": [ + "zephyr-sdk" + ], + "tags": "get,zephyr-sdk" + }, + { + "inherit_variation_tags": "True", + "skip_if_env": { + "CM_TINY_BUILD_DIR": [ + "on" + ] + }, + "tags": "reproduce,tiny,mlperf" + } + ], + "input_mapping": { + "build_dir": "CM_TINY_BUILD_DIR" + }, + "local_env_keys": [ + "CM_*" + ], + "tags": [ + "flash", + "tiny", + "mlperf", + "mlcommons" + ], + "uid": "98913babb43f4fcb", + "variations": { + "NRF": {}, + "NUCLEO": {}, + "ad": {}, + "cmsis_nn": {}, + "ic": {}, + "kws": {}, + "native": {}, + "vww": {} + } +} diff --git a/script/flash-tinyml-binary/customize.py b/script/flash-tinyml-binary/customize.py new file mode 100644 index 0000000000..a2062be59c --- /dev/null +++ b/script/flash-tinyml-binary/customize.py @@ -0,0 +1,19 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + if 'CM_TINY_BUILD_DIR' not in env: + return {'return':1, 'error': 'Please set CM_TINY_BUILD_DIR to the build directory of the model'} + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/flash-tinyml-binary/run.sh b/script/flash-tinyml-binary/run.sh new file mode 100644 index 0000000000..962dc74d53 --- /dev/null +++ b/script/flash-tinyml-binary/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +build_dir=${CM_TINY_BUILD_DIR} +cmd="cd ${CM_ZEPHYR_DIR}" +echo $cmd +eval $cmd +cmd="west flash --build-dir ${build_dir}" +echo $cmd +eval $cmd +test $? -eq 0 || exit 1 + diff --git a/script/generate-mlperf-inference-submission/README-extra.md b/script/generate-mlperf-inference-submission/README-extra.md new file mode 100644 index 0000000000..0510432d5b --- /dev/null +++ b/script/generate-mlperf-inference-submission/README-extra.md @@ -0,0 +1,12 @@ +# Generate MLPerf Inference Submission Folder +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) takes in a MLPerf Inference results folder (same folder structure assumed as produced by MLPerf inference reference implementation) and produces a valid submission folder as required by the [MLPerf Inference submission checker](https://github.com/mlcommons/inference/blob/master/tools/submission/submission-checker.py). + +## How To +```bash +cm run script --tags=generate,mlperf-inference-submission --results_dir=[MLPERF_RESULT_DIR] --submission_dir=[SUBMISSION_FOLDER] +``` + +### Additional Options +* `[--run_checker]:` Runs the MLPerf Inference submission checker on the produced submission folder +* `[--skip_truncation]:` If on will not run the truncation of the accuracy logs (useful for testing) +* `[--run_style]:` If set to "valid" will indicate the result folder is from a full and valid MLPerf inference run and will trigget the accuracy truncation script unless `--skip_truncation` flag is set. diff --git a/script/generate-mlperf-inference-submission/README.md b/script/generate-mlperf-inference-submission/README.md new file mode 100644 index 0000000000..c29c2ad73c --- /dev/null +++ b/script/generate-mlperf-inference-submission/README.md @@ -0,0 +1,186 @@ +Automatically generated README for this automation recipe: **generate-mlperf-inference-submission** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=generate-mlperf-inference-submission,5f8ab2d0b5874d53) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-submission)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission` + +`cm run script --tags=generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission [--input_flags]` + +*or* + +`cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission"` + +`cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--analyzer_settings_file=value` → `CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH=value` +* `--category=value` → `CM_MLPERF_SUBMISSION_CATEGORY=value` +* `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` +* `--dashboard=value` → `CM_MLPERF_DASHBOARD=value` +* `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value` +* `--device=value` → `CM_MLPERF_DEVICE=value` +* `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value` +* `--duplicate=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value` +* `--hw_name=value` → `CM_HW_NAME=value` +* `--hw_notes_extra=value` → `CM_MLPERF_SUT_HW_NOTES_EXTRA=value` +* `--infer_scenario_results=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value` +* `--power_settings_file=value` → `CM_MLPERF_POWER_SETTINGS_FILE_PATH=value` +* `--preprocess=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` +* `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` +* `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR_=value` +* `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value` +* `--run_style=value` → `CM_MLPERF_RUN_STYLE=value` +* `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value` +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` +* `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` +* `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "analyzer_settings_file":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_RUN_MLPERF_ACCURACY: `on` +* CM_MLPERF_RUN_STYLE: `valid` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-submission/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,sut,system-description + - CM script: [get-mlperf-inference-sut-description](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-sut-description) + * install,pip-package,for-cmind-python,_package.tabulate + - CM script: [install-pip-package-for-cmind-python](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pip-package-for-cmind-python) + * get,mlperf,inference,utils + - CM script: [get-mlperf-inference-utils](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-utils) + * get,mlperf,results,dir + * `if (CM_MLPERF_INFERENCE_RESULTS_DIR_ != on)` + * CM names: `--adr.['get-mlperf-results-dir']...` + - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir) + * get,mlperf,submission,dir + * `if (CM_MLPERF_INFERENCE_SUBMISSION_DIR != on)` + * CM names: `--adr.['get-mlperf-submission-dir']...` + - CM script: [get-mlperf-inference-submission-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-submission-dir) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-submission/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-submission/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-submission/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-submission/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-submission/_cm.json)*** + * accuracy,truncate,mlc + * `if (CM_RUN_MLPERF_ACCURACY == on) AND (CM_SKIP_TRUNCATE_ACCURACY != yes)` + - CM script: [truncate-mlperf-inference-accuracy-log](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) + * preprocess,mlperf,submission + * `if (CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR in ['on', 'True', 'yes', True])` + - CM script: [preprocess-mlperf-inference-submission](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/preprocess-mlperf-inference-submission) + * submission,inference,checker,mlc + * `if (CM_RUN_SUBMISSION_CHECKER == yes)` + * CM names: `--adr.['mlperf-inference-submission-checker', 'submission-checker']...` + - CM script: [run-mlperf-inference-submission-checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) + +___ +### Script output +`cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/generate-mlperf-inference-submission/_cm.json b/script/generate-mlperf-inference-submission/_cm.json new file mode 100644 index 0000000000..0db08b54ce --- /dev/null +++ b/script/generate-mlperf-inference-submission/_cm.json @@ -0,0 +1,124 @@ +{ + "alias": "generate-mlperf-inference-submission", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "MLPerf benchmark support", + "cache": false, + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlcommons,inference,src" + }, + { + "tags": "get,sut,system-description" + }, + { + "tags": "install,pip-package,for-cmind-python,_package.tabulate" + }, + { + "tags": "get,mlperf,inference,utils" + }, + { + "tags": "get,mlperf,results,dir", + "names": [ + "get-mlperf-results-dir" + ], + "skip_if_env": { + "CM_MLPERF_INFERENCE_RESULTS_DIR_": [ "on" ] + } + }, + { + "tags": "get,mlperf,submission,dir", + "names": [ + "get-mlperf-submission-dir" + ], + "skip_if_env": { + "CM_MLPERF_INFERENCE_SUBMISSION_DIR": [ "on" ] + } + } + ], + "input_mapping": { + "device": "CM_MLPERF_DEVICE", + "results_dir": "CM_MLPERF_INFERENCE_RESULTS_DIR_", + "run_checker": "CM_RUN_SUBMISSION_CHECKER", + "run_style": "CM_MLPERF_RUN_STYLE", + "skip_truncation": "CM_SKIP_TRUNCATE_ACCURACY", + "submission_dir": "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "clean": "CM_MLPERF_CLEAN_SUBMISSION_DIR", + "hw_name": "CM_HW_NAME", + "sw_notes_extra": "CM_MLPERF_SUT_SW_NOTES_EXTRA", + "hw_notes_extra": "CM_MLPERF_SUT_HW_NOTES_EXTRA", + "duplicate": "CM_MLPERF_DUPLICATE_SCENARIO_RESULTS", + "dashboard": "CM_MLPERF_DASHBOARD", + "dashboard_wb_project": "CM_MLPERF_DASHBOARD_WANDB_PROJECT", + "division": "CM_MLPERF_SUBMISSION_DIVISION", + "category": "CM_MLPERF_SUBMISSION_CATEGORY", + "analyzer_settings_file": "CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH", + "power_settings_file": "CM_MLPERF_POWER_SETTINGS_FILE_PATH", + "infer_scenario_results": "CM_MLPERF_DUPLICATE_SCENARIO_RESULTS", + "submitter": "CM_MLPERF_SUBMITTER", + "preprocess": "CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR", + "preprocess_submission": "CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR", + "tar": "CM_TAR_SUBMISSION_DIR" + }, + "default_env": { + "CM_RUN_MLPERF_ACCURACY": "on", + "CM_MLPERF_RUN_STYLE": "valid" + }, + "post_deps": [ + { + "enable_if_env": { + "CM_RUN_MLPERF_ACCURACY": [ + "on" + ] + }, + "skip_if_env": { + "CM_SKIP_TRUNCATE_ACCURACY": [ + "yes" + ] + }, + "tags": "accuracy,truncate,mlc" + }, + { + "enable_if_env": { + "CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR": [ + "on", "True", "yes", true + ] + }, + "tags": "preprocess,mlperf,submission" + }, + { + "enable_if_env": { + "CM_RUN_SUBMISSION_CHECKER": [ + "yes" + ] + }, + "tags": "submission,inference,checker,mlc", + "names": [ + "mlperf-inference-submission-checker", + "submission-checker" + ] + } + ], + "tags": [ + "generate", + "submission", + "mlperf", + "mlperf-inference", + "inference", + "mlcommons", + "inference-submission", + "mlperf-inference-submission", + "mlcommons-inference-submission" + ], + "uid": "5f8ab2d0b5874d53" +} diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py new file mode 100644 index 0000000000..c29b34e241 --- /dev/null +++ b/script/generate-mlperf-inference-submission/customize.py @@ -0,0 +1,339 @@ +from cmind import utils +import os +import json +import shutil +import cmind +import sys +from tabulate import tabulate +import mlperf_utils + +def preprocess(i): + return {'return': 0} + +def generate_submission(i): + + # Save current user directory + cur_dir=os.getcwd() + env = i['env'] + state = i['state'] + inp=i['input'] + + if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR_', '') == '': + env['CM_MLPERF_INFERENCE_RESULTS_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_DIR'], f"{env['CM_MLPERF_RUN_STYLE']}_results") + else: + env['CM_MLPERF_INFERENCE_RESULTS_DIR'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] + + mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE'] + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + sys.path.append(submission_checker_dir) + + results_dir = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': + from pathlib import Path + user_home = str(Path.home()) + env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join(user_home, "mlperf_submission") + + submission_dir = env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') + + if env.get('CM_MLPERF_CLEAN_SUBMISSION_DIR','')!='': + print ('=================================================') + print ('Cleaning {} ...'.format(env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'])) + if os.path.exists(env['CM_MLPERF_INFERENCE_SUBMISSION_DIR']): + shutil.rmtree(env['CM_MLPERF_INFERENCE_SUBMISSION_DIR']) + print ('=================================================') + + if not os.path.isdir(submission_dir): + os.makedirs(submission_dir) + + print('* MLPerf inference submission dir: {}'.format(submission_dir)) + print('* MLPerf inference results dir: {}'.format(results_dir)) + results = [f for f in os.listdir(results_dir) if not os.path.isfile(os.path.join(results_dir, f))] + + system_meta_default = state['CM_SUT_META'] + system_meta = {} + if 'CM_MLPERF_SUBMISSION_SYSTEM_TYPE' in env: + system_meta['system_type'] = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] + + if 'CM_MLPERF_SUBMISSION_DIVISION' in env: + system_meta['division'] = env['CM_MLPERF_SUBMISSION_DIVISION'] + + if 'CM_MLPERF_SUBMISSION_CATEGORY' in env: + system_meta['system_type'] = env['CM_MLPERF_SUBMISSION_CATEGORY'].replace("-", ",") + + duplicate= (env.get('CM_MLPERF_DUPLICATE_SCENARIO_RESULTS', 'no') in ["yes", "True"]) + + if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': + division = env['CM_MLPERF_SUBMISSION_DIVISION'] + system_meta['division'] = division + else: + division = system_meta_default['division'] + + if division not in ['open','closed']: + return {'return':1, 'error':'"division" must be "open" or "closed"'} + + print('* MLPerf inference division: {}'.format(division)) + + path_submission_root = submission_dir + path_submission_division=os.path.join(path_submission_root, division) + if not os.path.isdir(path_submission_division): + os.makedirs(path_submission_division) + + # Check submitter + if env.get('CM_MLPERF_SUBMITTER'): + submitter = env['CM_MLPERF_SUBMITTER'] + system_meta['submitter'] = submitter + else: + submitter = system_meta_default['submitter'] + env['CM_MLPERF_SUBMITTER'] = submitter + + print('* MLPerf inference submitter: {}'.format(submitter)) + + if 'Collective' not in system_meta_default.get('sw_notes'): + system_meta['sw_notes'] = "Automated by MLCommons CM v{}. ".format(cmind.__version__) + system_meta_default['sw_notes'] + + if env.get('CM_MLPERF_SUT_SW_NOTES_EXTRA','') != '': + sw_notes = f"{system_meta['sw_notes']} {env['CM_MLPERF_SUT_SW_NOTES_EXTRA']}" + system_meta['sw_notes'] = sw_notes + + if env.get('CM_MLPERF_SUT_HW_NOTES_EXTRA','') != '': + hw_notes = f"{system_meta['hw_notes']} {env['CM_MLPERF_SUT_HW_NOTES_EXTRA']}" + system_meta['hw_notes'] = hw_notes + + path_submission=os.path.join(path_submission_division, submitter) + if not os.path.isdir(path_submission): + os.makedirs(path_submission) + + # SUT base + system=env.get('CM_HW_NAME','default').replace(' ','_') + + code_path = os.path.join(path_submission, "code") + + for res in results: + parts = res.split("-") + if len(parts) > 5: #result folder structure used by CM script + system = parts[0] if system == 'default' else system + implementation = parts[1] + device = parts[2] + framework = parts[3] + framework_version = parts[4] + run_config = parts[5] + + print('* System: {}'.format(system)) + print('* Implementation: {}'.format(implementation)) + print('* Device: {}'.format(device)) + print('* Framework: {}'.format(framework)) + print('* Framework Version: {}'.format(framework_version)) + print('* Run Config: {}'.format(run_config)) + + new_res = system + "-" + "-".join(parts[1:]) + + # Override framework and framework versions from the folder name + system_meta_default['framework'] = framework + " " + framework_version + else: + print(parts) + return {'return': 1} + result_path = os.path.join(results_dir, res) + platform_prefix = inp.get('platform_prefix', '') + if platform_prefix: + sub_res = platform_prefix + "-" + new_res + else: + sub_res = new_res + + submission_path = os.path.join(path_submission, "results", sub_res) + measurement_path = os.path.join(path_submission, "measurements", sub_res) + compliance_path = os.path.join(path_submission, "compliance", sub_res) + system_path = os.path.join(path_submission, "systems") + submission_system_path = system_path + if not os.path.isdir(submission_system_path): + os.makedirs(submission_system_path) + system_file = os.path.join(submission_system_path, sub_res+".json") + + models = [f for f in os.listdir(result_path) if not os.path.isfile(os.path.join(result_path, f))] + + results = {} + + for model in models: + results[model] = {} + result_model_path = os.path.join(result_path, model) + submission_model_path = os.path.join(submission_path, model) + measurement_model_path = os.path.join(measurement_path, model) + compliance_model_path = os.path.join(compliance_path, model) + code_model_path = os.path.join(code_path, model) + scenarios = [f for f in os.listdir(result_model_path) if not os.path.isfile(os.path.join(result_model_path, f))] + submission_code_path = code_model_path + if not os.path.isdir(submission_code_path): + os.makedirs(submission_code_path) + if not os.path.exists(os.path.join(submission_code_path, "README.md")): + with open(os.path.join(submission_code_path, "README.md"), mode='w') as f: + f.write("TBD") #create an empty README + + print('* MLPerf inference model: {}'.format(model)) + for scenario in scenarios: + results[model][scenario] = {} + result_scenario_path = os.path.join(result_model_path, scenario) + submission_scenario_path = os.path.join(submission_model_path, scenario) + measurement_scenario_path = os.path.join(measurement_model_path, scenario) + compliance_scenario_path = os.path.join(compliance_model_path, scenario) + + if duplicate and scenario=='singlestream': + if not os.path.exists(os.path.join(result_model_path, "offline")): + print('Duplicating results from {} to offline:'.format(scenario)) + shutil.copytree(result_scenario_path, os.path.join(result_model_path, "offline")) + scenarios.append("offline") + if not os.path.exists(os.path.join(result_model_path, "multistream")): + print('Duplicating results from {} to multistream:'.format(scenario)) + shutil.copytree(result_scenario_path, os.path.join(result_model_path, "multistream")) + scenarios.append("multistream") + + modes = [f for f in os.listdir(result_scenario_path) if not os.path.isfile(os.path.join(result_scenario_path, f))] + power_run = False + for mode in modes: + result_mode_path = os.path.join(result_scenario_path, mode) + submission_mode_path = os.path.join(submission_scenario_path, mode) + submission_measurement_path = measurement_scenario_path + submission_compliance_path = os.path.join(compliance_scenario_path, mode) + if mode.startswith("TEST"): + submission_results_path = submission_compliance_path + else: + submission_results_path = submission_mode_path + if os.path.exists(submission_results_path): + shutil.rmtree(submission_results_path) + + if not os.path.isdir(submission_measurement_path): + os.makedirs(submission_measurement_path) + + if mode=='performance': + + if os.path.exists(os.path.join(result_mode_path, "power")): + power_run = True + result_power_path=os.path.join(result_mode_path, 'power') + submission_power_path=os.path.join(submission_mode_path, 'power') + os.makedirs(submission_power_path) + power_files = [] + for f in os.listdir(result_power_path): + power_files.append(f) #Todo add required check from submission_checker + for f in power_files: + shutil.copy(os.path.join(result_power_path, f), os.path.join(submission_power_path, f)) + + analyzer_settings_file = env.get('CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH', os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "analyzer_table.md")) + power_settings_file = env.get('CM_MLPERF_POWER_SETTINGS_FILE_PATH', os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "power_settings.md")) + + shutil.copy(analyzer_settings_file, os.path.join(submission_measurement_path, "analyzer_table.md")) + shutil.copy(power_settings_file, os.path.join(submission_measurement_path, "power_settings.md")) + + result_ranging_path=os.path.join(result_mode_path, 'ranging') + submission_ranging_path=os.path.join(submission_mode_path, 'ranging') + os.makedirs(submission_ranging_path) + ranging_files = [] + for f in os.listdir(result_ranging_path): + ranging_files.append(f) #Todo add required check from submission_checker + for f in ranging_files: + shutil.copy(os.path.join(result_ranging_path, f), os.path.join(submission_ranging_path, f)) + + result_mode_path=os.path.join(result_mode_path, 'run_1') + submission_results_path=os.path.join(submission_mode_path, 'run_1') + + if os.path.exists(os.path.join(result_mode_path, "system_meta.json")): + with open(os.path.join(result_mode_path, "system_meta.json"), "r") as f: + saved_system_meta = json.load(f) + for key in list(saved_system_meta): + if saved_system_meta[key]==None or str(saved_system_meta[key]).strip() == '': + del(saved_system_meta[key]) + system_meta = {**saved_system_meta, **system_meta} #override the saved meta with the user inputs + system_meta = {**system_meta_default, **system_meta} #add any missing fields from the defaults + + if not os.path.isdir(submission_results_path): + os.makedirs(submission_results_path) + + #if division == "closed" and not os.path.isdir(submission_compliance_path): + # os.makedirs(submission_compliance_path) + + mlperf_inference_conf_path = os.path.join(result_mode_path, "mlperf.conf") + if os.path.exists(mlperf_inference_conf_path): + shutil.copy(mlperf_inference_conf_path, os.path.join(submission_measurement_path, 'mlperf.conf')) + user_conf_path = os.path.join(result_mode_path, "user.conf") + if os.path.exists(user_conf_path): + shutil.copy(user_conf_path, os.path.join(submission_measurement_path, 'user.conf')) + measurements_json_path = os.path.join(result_mode_path, "measurements.json") + if os.path.exists(user_conf_path): + shutil.copy(measurements_json_path, os.path.join(submission_measurement_path, sub_res+'.json')) + files = [] + readme = False + + for f in os.listdir(result_mode_path): + if mode.startswith("TEST"): + if f.startswith('verify_'): + files.append(f) + elif f == "performance": + compliance_performance_run_path = os.path.join(result_mode_path, f, "run_1") + if os.path.exists(compliance_performance_run_path): + target = os.path.join(submission_results_path, "performance", "run_1") + os.makedirs(target) + for log_file in os.listdir(compliance_performance_run_path): + if log_file.startswith("mlperf_"): + shutil.copy(os.path.join(compliance_performance_run_path, log_file), os.path.join(target, log_file)) + elif f == "accuracy": + compliance_accuracy_run_path = os.path.join(result_mode_path, f) + if os.path.exists(compliance_accuracy_run_path): + target = os.path.join(submission_results_path, "accuracy") + os.makedirs(target) + for log_file in os.listdir(compliance_accuracy_run_path): + if log_file.startswith("mlperf_log_accuracy.json") or log_file.endswith("accuracy.txt"): + shutil.copy(os.path.join(compliance_accuracy_run_path, log_file), os.path.join(target, log_file)) + else: + if f.startswith('mlperf_') and not f.endswith('trace.json'): + files.append(f) + elif f == "spl.txt": + files.append(f) + elif f in [ "README.md", "README-extra.md", "cm-version-info.json", "os_info.json", "cpu_info.json", "pip_freeze.json" ] and mode == "performance": + shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, f)) + elif f in [ "console.out" ]: + shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, mode+"_"+f)) + + + if mode == "accuracy": + if os.path.exists(os.path.join(result_mode_path, "accuracy.txt")): + files.append("accuracy.txt") + if model == "stable-diffusion-xl" and os.path.exists(os.path.join(result_mode_path, "images")): + shutil.copytree(os.path.join(result_mode_path, "images"), os.path.join(submission_results_path, "images")) + + for f in files: + print(' * ' + f) + p_target = os.path.join(submission_results_path, f) + shutil.copy(os.path.join(result_mode_path, f), p_target) + + + readme_file = os.path.join(submission_measurement_path, "README.md") + if not os.path.exists(readme_file): + with open(readme_file, mode='w') as f: + f.write("TBD") #create an empty README + else: + readme_suffix = "" + result_string, result = mlperf_utils.get_result_string(env['CM_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file) + + for key in result: + results[model][scenario][key] = result[key] + with open(readme_file, mode='a') as f: + f.write(result_string) + + with open(system_file, "w") as fp: + json.dump(system_meta, fp, indent=2) + + result_table, headers = mlperf_utils.get_result_table(results) + + print(tabulate(result_table, headers = headers, tablefmt="pretty")) + sut_readme_file = os.path.join(measurement_path, "README.md") + with open(sut_readme_file, mode='w') as f: + f.write(tabulate(result_table, headers = headers, tablefmt="github")) + + + return {'return':0} + +def postprocess(i): + + r = generate_submission(i) + if r['return'] > 0: + return r + + return {'return':0} diff --git a/script/generate-mlperf-inference-submission/default_files/analyzer_table.md b/script/generate-mlperf-inference-submission/default_files/analyzer_table.md new file mode 100644 index 0000000000..fee88895c6 --- /dev/null +++ b/script/generate-mlperf-inference-submission/default_files/analyzer_table.md @@ -0,0 +1,3 @@ +| Vendor | Model | Firmware | Config | Interface | Wiring/topology | Number of channels used | Which channel(s) | +|----------|--------|----------|-----------------|-----------|-----------------|-------------------------|------------------| +| Yokogawa | WT310E | 1.04 | Single channel | USB | 1P2W | 1 | 1 | diff --git a/script/generate-mlperf-inference-submission/default_files/power_settings.md b/script/generate-mlperf-inference-submission/default_files/power_settings.md new file mode 100644 index 0000000000..a00bcf9925 --- /dev/null +++ b/script/generate-mlperf-inference-submission/default_files/power_settings.md @@ -0,0 +1 @@ +No special setting for power management is done. Out-of-the-box OS settings are used. diff --git a/script/generate-mlperf-inference-user-conf/README.md b/script/generate-mlperf-inference-user-conf/README.md new file mode 100644 index 0000000000..c06cd2f223 --- /dev/null +++ b/script/generate-mlperf-inference-user-conf/README.md @@ -0,0 +1,200 @@ +Automatically generated README for this automation recipe: **generate-mlperf-inference-user-conf** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=generate-mlperf-inference-user-conf,3af4475745964b93) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-user-conf)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *generate,mlperf,inference,user-conf,inference-user-conf* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "generate mlperf inference user-conf inference-user-conf" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=generate,mlperf,inference,user-conf,inference-user-conf` + +`cm run script --tags=generate,mlperf,inference,user-conf,inference-user-conf [--input_flags]` + +*or* + +`cmr "generate mlperf inference user-conf inference-user-conf"` + +`cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,mlperf,inference,user-conf,inference-user-conf' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="generate,mlperf,inference,user-conf,inference-user-conf"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=generate,mlperf,inference,user-conf,inference-user-conf) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "generate mlperf inference user-conf inference-user-conf" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--hw_name=value` → `CM_HW_NAME=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--num_threads=value` → `CM_NUM_THREADS=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `OUTPUT_BASE_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_PERFORMANCE_SAMPLE_COUNT=value` +* `--power=value` → `CM_MLPERF_POWER=value` +* `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` +* `--rerun=value` → `CM_RERUN=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "count":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_LOADGEN_MODE: `accuracy` +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_OUTPUT_FOLDER_NAME: `test_results` +* CM_MLPERF_RUN_STYLE: `test` +* CM_TEST_QUERY_COUNT: `10` +* CM_FAST_FACTOR: `5` +* CM_MLPERF_QUANTIZATION: `False` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-user-conf/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlperf,results,dir + * `if (OUTPUT_BASE_DIR != on)` + * CM names: `--adr.['get-mlperf-results-dir']...` + - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,sut,configs + - CM script: [get-mlperf-inference-sut-configs](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-sut-configs) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-user-conf/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-user-conf/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-user-conf/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-inference-user-conf/_cm.yaml) + +___ +### Script output +`cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_HW_*` +* `CM_LOGS_DIR` +* `CM_MAX_EXAMPLES` +* `CM_MLPERF_*` +* `CM_SUT_*` +#### New environment keys auto-detected from customize + +* `CM_LOGS_DIR` +* `CM_MAX_EXAMPLES` +* `CM_MLPERF_ACCURACY_RESULTS_DIR` +* `CM_MLPERF_COMPLIANCE_RUN_POSTPONED` +* `CM_MLPERF_CONF` +* `CM_MLPERF_INFERENCE_AUDIT_PATH` +* `CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR` +* `CM_MLPERF_INFERENCE_MIN_DURATION` +* `CM_MLPERF_LOADGEN_LOGS_DIR` +* `CM_MLPERF_LOADGEN_MODE` +* `CM_MLPERF_LOADGEN_QUERY_COUNT` +* `CM_MLPERF_LOADGEN_SCENARIO` +* `CM_MLPERF_LOADGEN_TARGET_LATENCY` +* `CM_MLPERF_LOADGEN_TARGET_QPS` +* `CM_MLPERF_OUTPUT_DIR` +* `CM_MLPERF_POWER_LOG_DIR` +* `CM_MLPERF_RANGING_USER_CONF` +* `CM_MLPERF_RUN_STYLE` +* `CM_MLPERF_SKIP_RUN` +* `CM_MLPERF_TESTING_USER_CONF` +* `CM_MLPERF_USER_CONF` +* `CM_MLPERF_USE_MAX_DURATION` \ No newline at end of file diff --git a/script/generate-mlperf-inference-user-conf/_cm.yaml b/script/generate-mlperf-inference-user-conf/_cm.yaml new file mode 100644 index 0000000000..36f591df4a --- /dev/null +++ b/script/generate-mlperf-inference-user-conf/_cm.yaml @@ -0,0 +1,92 @@ +# Identification of this CM script +alias: generate-mlperf-inference-user-conf +uid: 3af4475745964b93 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - generate + - mlperf + - inference + - user-conf + - inference-user-conf + +# Default environment +default_env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + CM_TEST_QUERY_COUNT: '10' + CM_FAST_FACTOR: '5' + CM_MLPERF_QUANTIZATION: off + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + hw_name: CM_HW_NAME + mode: CM_MLPERF_LOADGEN_MODE + num_threads: CM_NUM_THREADS + output_dir: OUTPUT_BASE_DIR + power: CM_MLPERF_POWER + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + scenario: CM_MLPERF_LOADGEN_SCENARIO + test_query_count: CM_TEST_QUERY_COUNT + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_LOGS_DIR + - CM_HW_* + - CM_SUT_* + - CM_MAX_EXAMPLES + +new_state_keys: + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + - tags: get,mlperf,results,dir + names: + - get-mlperf-results-dir + skip_if_env: + OUTPUT_BASE_DIR: + - "on" + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Get SUT configs (System Under Test) + - tags: get,sut,configs diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py new file mode 100644 index 0000000000..87e10eeeed --- /dev/null +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -0,0 +1,446 @@ +from cmind import utils +import os +import json +import shutil +import subprocess +import cmind as cm +import sys + +def preprocess(i): + + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + rerun = True if env.get("CM_RERUN","")!='' else False + + env['CM_MLPERF_SKIP_RUN'] = env.get('CM_MLPERF_SKIP_RUN', "no") + + mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE'] + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + sys.path.append(submission_checker_dir) + + version = env.get('CM_MLPERF_INFERENCE_VERSION', "4.0") + + required_files = [] + required_files = get_checker_files() + + if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: + env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if 'CM_MLPERF_LOADGEN_MODE' not in env: + print("\nNo mode given. Using accuracy as default\n") + env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" + + + if env.get('OUTPUT_BASE_DIR', '') == '': + env['OUTPUT_BASE_DIR'] = env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + + + RUN_CMD = "" + state['RUN'] = {} + test_list = ["TEST01", "TEST04", "TEST05"] + if env['CM_MODEL'] in ["rnnt", "bert-99", "bert-99.9", "dlrm-v2-99", "dlrm-v2-99.9", "3d-unet-99", "3d-unet-99.9"]: + test_list.remove("TEST04") + if "gpt-" in env['CM_MODEL']: + test_list.remove("TEST05") + test_list.remove("TEST04") + test_list.remove("TEST01") + + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + state['RUN'][scenario] = {} + + model_full_name = env.get('CM_ML_MODEL_FULL_NAME', env['CM_MODEL']) + + if model_full_name != env['CM_MODEL']: + if 'model_mapping' not in state['CM_SUT_CONFIG']: + state['CM_SUT_CONFIG']['model_mappings'] = {} + state['CM_SUT_CONFIG']['model_mappings'][model_full_name] = env['CM_MODEL'] + + if model_full_name not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']]: + i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name] = {} + + if scenario not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name]: + i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name][scenario] = {} + + + conf = i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name][scenario] + + mode = env['CM_MLPERF_LOADGEN_MODE'] + + user_conf = '' + if env['CM_MLPERF_RUN_STYLE'] == "fast": + fast_factor = int(env['CM_FAST_FACTOR']) + else: + fast_factor = 1 + + ml_model_name = env['CM_MODEL'] + if 'bert' in ml_model_name: + ml_model_name = "bert" + if 'dlrm' in ml_model_name: + ml_model_name = "dlrm-v2" + if '3d-unet' in ml_model_name: + ml_model_name = "3d-unet" + if 'gptj' in ml_model_name: + ml_model_name = "gptj" + + query_count = None + + value = None + if scenario in [ 'Offline', 'Server' ]: + metric = "target_qps" + tolerance = 1.01 + #value = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') + value = env.get('CM_MLPERF_LOADGEN_TARGET_QPS') + elif scenario in [ 'SingleStream', 'MultiStream' ]: + metric = "target_latency" + value = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') + if value: + if scenario == "SingleStream" and (1000/float(value) * 660 < 100): + env['CM_MLPERF_USE_MAX_DURATION'] = 'no' + elif scenario == "MultiStream" and (1000/float(value) * 660 < 662): + env['CM_MLPERF_USE_MAX_DURATION'] = 'no' + if env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in [ "yes", "1", "true" ] and env.get('CM_MLPERF_USE_MAX_DURATION', "yes").lower() not in [ "no", "false", "0"]: + tolerance = 0.4 #much lower because we have max_duration + else: + tolerance = 0.9 + else: + return {'return': 1, 'error': 'Invalid scenario: {}'.format(scenario)} + + if value: + metric_value = value + conf[metric] = value + else: + if metric in conf: + print("Original configuration value {} {}".format(conf[metric], metric)) + metric_value = str(float(conf[metric]) * tolerance) #some tolerance + print("Adjusted configuration value {} {}".format(metric_value, metric)) + else: + #if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + if metric == "target_qps": + if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + print("In find performance mode: using 1 as target_qps") + else: + print("No target_qps specified. Using 1 as target_qps") + conf[metric] = 1 + if metric == "target_latency": + if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + print("In find performance mode: using 0.5ms as target_latency") + else: + print("No target_latency specified. Using default") + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() in [ "no", "false", "0" ] or env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in [ "yes", "1", "true" ]: + # Total number of queries needed is a multiple of dataset size. So we dont use max_duration and so we need to be careful with the input latency + if '3d-unet' in env['CM_MODEL']: + conf[metric] = 400 + elif 'gptj' in env['CM_MODEL']: + conf[metric] = 1000 + else: + conf[metric] = 100 + else: + conf[metric] = 0.5 + metric_value = conf[metric] + #else: + # return {'return': 1, 'error': f"Config details missing for SUT:{env['CM_SUT_NAME']}, Model:{env['CM_MODEL']}, Scenario: {scenario}. Please input {metric} value"} + + #Pass the modified performance metrics to the implementation + if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + if metric == "target_latency" and env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY', '') == '': + env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = conf[metric] + elif metric == "target_qps" and env.get('CM_MLPERF_LOADGEN_TARGET_QPS', '') == '': + env['CM_MLPERF_LOADGEN_TARGET_QPS'] = conf[metric] + + + if env['CM_MLPERF_RUN_STYLE'] == "fast": + if scenario == "Offline": + metric_value = float(metric_value) / fast_factor + if scenario in [ "SingleStream", "MultiStream" ]: + metric_value = float(metric_value) * fast_factor + + elif env['CM_MLPERF_RUN_STYLE'] == "test": + if scenario == "Offline": + metric_value = 1 + if scenario in [ "SingleStream" ]: + metric_value = 1000 + + elif env['CM_MLPERF_RUN_STYLE'] == "valid": + if scenario == "Offline": + required_min_queries_offline = {} + required_min_queries_offline = get_required_min_queries_offline(env['CM_MODEL'], version) + + + if mode == "compliance" and scenario == "Server": #Adjust the server_target_qps + test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + if test == "TEST01": + metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST01_SERVER_ADJUST_FACTOR", 0.96))) + if test == "TEST05": + metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST05_SERVER_ADJUST_FACTOR", 0.97))) + if test == "TEST04": + metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST04_SERVER_ADJUST_FACTOR", 0.97))) + + conf[metric] = metric_value + user_conf += ml_model_name + "." + scenario + "." + metric + " = " + str(metric_value) + "\n" + + if env.get('CM_MLPERF_PERFORMANCE_SAMPLE_COUNT', '') != '': + performance_sample_count = env['CM_MLPERF_PERFORMANCE_SAMPLE_COUNT'] + user_conf += ml_model_name + ".*.performance_sample_count_override = " + performance_sample_count + "\n" + + log_mode = mode + if 'CM_MLPERF_POWER' in env and mode == "performance": + log_mode = "performance_power" + + env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join(env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME']) + + sut_name = env.get('CM_SUT_NAME', env['CM_MLPERF_BACKEND'] + "-" + env['CM_MLPERF_DEVICE']) + OUTPUT_DIR = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name, \ + model_full_name, scenario.lower(), mode) + + if 'CM_MLPERF_POWER' in env and mode == "performance": + env['CM_MLPERF_POWER_LOG_DIR'] = os.path.join(OUTPUT_DIR, "tmp_power") + + if mode == "accuracy": + pass + elif mode == "performance": + OUTPUT_DIR = os.path.join(OUTPUT_DIR, "run_1") + elif mode == "compliance": + test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + OUTPUT_DIR = os.path.join(env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME'], sut_name, model_full_name, scenario.lower(), test) + if test == "TEST01": + audit_path = os.path.join(test, ml_model_name) + else: + audit_path = test + + audit_full_path = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", audit_path, "audit.config") + env['CM_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path + #copy the audit conf to the run directory incase the implementation is not supporting the audit-conf path + if not os.path.exists(OUTPUT_DIR): + os.makedirs(OUTPUT_DIR) + shutil.copyfile(audit_full_path, os.path.join(OUTPUT_DIR, "audit.config")) + + env['CM_MLPERF_OUTPUT_DIR'] = OUTPUT_DIR + env['CM_LOGS_DIR'] = OUTPUT_DIR + env['CM_MLPERF_LOADGEN_LOGS_DIR'] = OUTPUT_DIR + + if mode == "accuracy": + output_dir = env['CM_MLPERF_OUTPUT_DIR'] + env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = output_dir + else: + env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = '' + + run_exists = run_files_exist(log_mode, OUTPUT_DIR, required_files, env) + + if 'CM_MLPERF_POWER' in env and env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['CM_MLPERF_RUN_STYLE'] == "valid" and mode == "performance": + short_ranging = True + else: + short_ranging = False + + if short_ranging: + import copy + ranging_user_conf = copy.deepcopy(user_conf) + ranging_user_conf += ml_model_name + "." + scenario + ".min_duration = 300000" + "\n" + + if env['CM_MLPERF_RUN_STYLE'] == "test": + query_count = env.get('CM_TEST_QUERY_COUNT', "5") + user_conf += ml_model_name + "." + scenario + ".max_query_count = " + query_count + "\n" + user_conf += ml_model_name + "." + scenario + ".min_query_count = " + query_count + "\n" + user_conf += ml_model_name + "." + scenario + ".min_duration = 0" + "\n" + #else: + # user_conf += ml_model_name + "." + scenario + ".min_duration = 20000" + "\n" + # user_conf += ml_model_name + "." + scenario + ".max_duration = 20000 \n " + + elif env['CM_MLPERF_RUN_STYLE'] == "fast": + if scenario == "Server": + target_qps = conf['target_qps'] + query_count = str(int((660/fast_factor) * (float(target_qps)))) + user_conf += ml_model_name + "." + scenario + ".max_query_count = " + query_count + "\n" + + else: + if scenario == "MultiStream" or scenario == "SingleStream": + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ "no", "false", "0" ] and env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in [ "yes", "1", "true" ]: + user_conf += ml_model_name + "." + scenario + ".max_duration = 660000 \n" + elif env.get('CM_MLPERF_INFERENCE_MIN_DURATION','') != '': + user_conf += ml_model_name + "." + scenario + ".min_duration = " + env['CM_MLPERF_INFERENCE_MIN_DURATION'] +" \n" + if scenario == "MultiStream": + user_conf += ml_model_name + "." + scenario + ".min_query_count = "+ env.get('CM_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT', "662") + "\n" + if short_ranging: + ranging_user_conf += ml_model_name + "." + scenario + ".max_duration = 300000 \n " + elif scenario == "SingleStream_old": + query_count = str(max(int((1000 / float(conf['target_latency'])) * 660), 64)) + user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(int(query_count)+40) + "\n" + #user_conf += ml_model_name + "." + scenario + ".min_query_count = " + query_count + "\n" + if short_ranging: + ranging_user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(int(query_count)+40) + "\n" + elif scenario == "Offline": + query_count = int(float(conf['target_qps']) * 660) + query_count = str(max(query_count, required_min_queries_offline)) + + #user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(int(query_count)+40) + "\n" + if short_ranging: + ranging_query_count = str(int(float(conf['target_qps']) * 300)) + ranging_user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(ranging_query_count) + "\n" + ranging_user_conf += ml_model_name + "." + scenario + ".min_query_count = 0 \n" + + if query_count: + env['CM_MAX_EXAMPLES'] = query_count #needed for squad accuracy checker + + + import uuid + from pathlib import Path + key = uuid.uuid4().hex + user_conf_path = os.path.join(script_path, "tmp", key+".conf") + user_conf_file = Path(user_conf_path) + user_conf_file.parent.mkdir(exist_ok=True, parents=True) + user_conf_file.write_text(user_conf) + + if short_ranging: + ranging_user_conf_path = os.path.join(script_path, "tmp", "ranging_"+key+".conf") + ranging_user_conf_file = Path(ranging_user_conf_path) + ranging_user_conf_file.write_text(ranging_user_conf) + + + if (env.get('CM_MLPERF_LOADGEN_QUERY_COUNT','') == '') and query_count and ((mode != "accuracy") or (env['CM_MLPERF_RUN_STYLE'] != "valid")): + env['CM_MLPERF_LOADGEN_QUERY_COUNT'] = query_count + + if not run_exists or rerun: + + print("Output Dir: '" + OUTPUT_DIR + "'") + print(user_conf) + if env.get('CM_MLPERF_POWER','') == "yes" and os.path.exists(env.get('CM_MLPERF_POWER_LOG_DIR', '')): + shutil.rmtree(env['CM_MLPERF_POWER_LOG_DIR']) + else: + if not env.get('CM_MLPERF_COMPLIANCE_RUN_POSTPONED', False): + print("Run files exist, skipping run...\n") + env['CM_MLPERF_SKIP_RUN'] = "yes" + + if not run_exists or rerun or not measure_files_exist(OUTPUT_DIR, \ + required_files[4]) or env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("CM_REGENERATE_MEASURE_FILES", False): + + env['CM_MLPERF_TESTING_USER_CONF'] = os.path.join(os.path.dirname(user_conf_path), key+".conf")# user_conf_path + env['CM_MLPERF_RANGING_USER_CONF'] = os.path.join(os.path.dirname(user_conf_path), "ranging_"+key+".conf")# ranging_user_conf_path for a shorter run + + if short_ranging: + env['CM_MLPERF_USER_CONF'] = "\${CM_MLPERF_USER_CONF}" + else: + env['CM_MLPERF_USER_CONF'] = os.path.join(os.path.dirname(user_conf_path), key+".conf")# user_conf_path + else: + print(f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n") + env['CM_MLPERF_USER_CONF'] = '' + + os.makedirs(OUTPUT_DIR, exist_ok=True) + + return {'return':0} + +def run_files_exist(mode, OUTPUT_DIR, run_files, env): + import submission_checker as checker + from log_parser import MLPerfLog + + is_valid = True + + file_loc = {"accuracy": 0, "performance": 1, "power": 2, "performance_power": 3, "measure": 4, "compliance": 1} + + required_files = run_files[file_loc[mode]] + if mode == "performance_power": + for file_ in run_files[2]: + file_path = os.path.join(os.path.dirname(OUTPUT_DIR), "power", file_) + if (not os.path.exists(file_path) or os.stat(file_path).st_size == 0): + return False + required_files += run_files[1] #We need performance files too in the run directory + + for file_ in required_files: + file_path = os.path.join(OUTPUT_DIR, file_) + if (not os.path.exists(file_path) or os.stat(file_path).st_size == 0) and file_ != "accuracy.txt": + return False + + if file_ == "mlperf_log_detail.txt" and "performance" in mode: + mlperf_log = MLPerfLog(file_path) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + return False + + if mode == "compliance": + #If a performance run followed the last compliance run, compliance check needs to be redone + RESULT_DIR = os.path.split(OUTPUT_DIR)[0] + COMPLIANCE_DIR = OUTPUT_DIR + OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) + + #If reference test result is invalid, don't do compliance run + file_path = os.path.join(RESULT_DIR, "performance", "run_1", "mlperf_log_detail.txt") + mlperf_log = MLPerfLog(file_path) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + env['CM_MLPERF_COMPLIANCE_RUN_POSTPONED'] = True + return True + + test = env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] + + SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") + cmd = env['CM_PYTHON_BIN'] + " " + SCRIPT_PATH + " -r " + RESULT_DIR + " -c " + COMPLIANCE_DIR + " -o "+ OUTPUT_DIR + print(cmd) + os.system(cmd) + + is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR) + + if not is_valid and 'Stream' in env['CM_MLPERF_LOADGEN_SCENARIO']: + env['CM_MLPERF_USE_MAX_DURATION'] = 'no' # We have the determined latency, compliance test failed, so lets not use max duration + env['CM_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run + + return is_valid + + if "power" in mode and env.get('CM_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in [ "yes", "true", "on" ]: + from power.power_checker import check as check_power_more + try: + is_valid = check_power_more(os.path.dirname(OUTPUT_DIR)) == 0 + except: + is_valid = False + return is_valid + + return is_valid + +def measure_files_exist(OUTPUT_DIR, run_files): + for file in run_files: + file_path = os.path.join(OUTPUT_DIR, file) + if not os.path.exists(file_path): + return False + return True + +def get_checker_files(): + import submission_checker as checker + + REQUIRED_ACC_FILES = checker.REQUIRED_ACC_FILES + REQUIRED_PERF_FILES = checker.REQUIRED_PERF_FILES + REQUIRED_POWER_FILES = checker.REQUIRED_POWER_FILES + REQUIRED_PERF_POWER_FILES = checker.REQUIRED_PERF_POWER_FILES + REQUIRED_MEASURE_FILES = checker.REQUIRED_MEASURE_FILES + return REQUIRED_ACC_FILES, REQUIRED_PERF_FILES, REQUIRED_POWER_FILES, REQUIRED_PERF_POWER_FILES, REQUIRED_MEASURE_FILES + +def get_required_min_queries_offline(model, version): + + import submission_checker as checker + + version_split = version.split(".") + if int(version[0]) < 4: + return 24756 + + REQUIRED_MIN_QUERIES = checker.OFFLINE_MIN_SPQ_SINCE_V4 + mlperf_model = model + mlperf_model = mlperf_model.replace("resnet50", "resnet") + + return REQUIRED_MIN_QUERIES[mlperf_model] diff --git a/script/generate-mlperf-tiny-report/README-extra.md b/script/generate-mlperf-tiny-report/README-extra.md new file mode 100644 index 0000000000..cf2e3366a3 --- /dev/null +++ b/script/generate-mlperf-tiny-report/README-extra.md @@ -0,0 +1,55 @@ +# About + +This portable CM script run submission checker and generates summary report for all Tiny MLPerf results +using [these native scripts](https://github.com/mlcommons/submissions_tiny_v1.1/pull/51). + +## Usage + +We have tested this portable CM script on Ubuntu and Windows. + +Install [MLCommons CM framework](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@ck +``` + +Install repositories with raw MLPerf inference benchmark results: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/tiny_results_v0.7" --extra_cache_tags=mlperf-tiny-results,version-0.7 +cmr "get git repo _repo.https://github.com/mlcommons/tiny_results_v1.0" --extra_cache_tags=mlperf-tiny-results,version-1.0 +``` + +You can also add private results to compare submissions locally before they become public: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" --extra_cache_tags=mlperf-tiny-results,version-1.1-private +``` + +You can use a specific checkout/branch as follows: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" \ + --extra_cache_tags=mlperf-tiny-results,version-1.1-private,generate_final_report \ + --depth="" \ + --branch=generate_final_report +``` + + +Now run this script: +```bash +cmr "generate mlperf-tiny report" +``` + +It will create `summary-{TinyMLPerf version}.csv' report in your current directory. + +You can also specify a version of a repository here: + +```bash +cmr "generate mlperf-tiny report" --repo_tags=1.1-private +``` + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/script/generate-mlperf-tiny-report/README.md b/script/generate-mlperf-tiny-report/README.md new file mode 100644 index 0000000000..20d12a419d --- /dev/null +++ b/script/generate-mlperf-tiny-report/README.md @@ -0,0 +1,147 @@ +Automatically generated README for this automation recipe: **generate-mlperf-tiny-report** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=generate-mlperf-tiny-report,709c3f3f9b3e4783) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *generate,mlperf,tiny,mlperf-tiny,report* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "generate mlperf tiny mlperf-tiny report" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=generate,mlperf,tiny,mlperf-tiny,report` + +`cm run script --tags=generate,mlperf,tiny,mlperf-tiny,report [--input_flags]` + +*or* + +`cmr "generate mlperf tiny mlperf-tiny report"` + +`cmr "generate mlperf tiny mlperf-tiny report " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,mlperf,tiny,mlperf-tiny,report' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="generate,mlperf,tiny,mlperf-tiny,report"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=generate,mlperf,tiny,mlperf-tiny,report) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "generate mlperf tiny mlperf-tiny report" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--repo_tags=value` → `CM_IMPORT_TINYMLPERF_REPO_TAGS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "repo_tags":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_IMPORT_TINYMLPERF_REPO_TAGS: `1.1-private` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_xlsxwriter + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pandas + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/_cm.yaml) + 1. ***Run native script if exists*** + * [run_submission_checker.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/run_submission_checker.bat) + * [run_submission_checker.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/run_submission_checker.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-report/_cm.yaml) + +___ +### Script output +`cmr "generate mlperf tiny mlperf-tiny report " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/generate-mlperf-tiny-report/_cm.yaml b/script/generate-mlperf-tiny-report/_cm.yaml new file mode 100644 index 0000000000..3af0906f73 --- /dev/null +++ b/script/generate-mlperf-tiny-report/_cm.yaml @@ -0,0 +1,42 @@ +# Identification of this CM script +alias: generate-mlperf-tiny-report +uid: 709c3f3f9b3e4783 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +default_env: + CM_IMPORT_TINYMLPERF_REPO_TAGS: "1.1-private" + +# User-friendly tags to find this CM script +tags: + - generate + - mlperf + - tiny + - mlperf-tiny + - report + +input_mapping: + repo_tags: CM_IMPORT_TINYMLPERF_REPO_TAGS + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect python3 + - tags: get,python3 + names: + - python + - python3 + + - tags: get,generic-python-lib,_xlsxwriter + - tags: get,generic-python-lib,_pandas diff --git a/script/generate-mlperf-tiny-report/customize.py b/script/generate-mlperf-tiny-report/customize.py new file mode 100644 index 0000000000..59b16019fb --- /dev/null +++ b/script/generate-mlperf-tiny-report/customize.py @@ -0,0 +1,83 @@ +import cmind as cm +from cmind import utils + +import os +import subprocess +import json +import shutil + +def preprocess(i): + + env = i['env'] + + cur_dir = os.getcwd() + + # Query cache for results dirs + env_repo_tags=env.get('CM_IMPORT_TINYMLPERF_REPO_TAGS','').strip() + xtags='' if env_repo_tags =='' else ',version-'+env_repo_tags + + r = cm.access({'action':'find', + 'automation':'cache,541d6f712a6b464e', + 'tags':'get,repo,mlperf-tiny-results'+xtags}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + return {'return':1, 'error':'no repository with TinyMLPerf results found'} + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = 'v'+t[8:] + break + + # Run local script + run_script_input = i['run_script_input'] + automation = i['automation'] + + env['CM_TINYMLPERF_REPO_PATH'] = path + env['CM_TINYMLPERF_CURRENT_DIR'] = cur_dir + env['CM_TINYMLPERF_REPO_VERSION'] = version + + print ('') + print ('Repo path: {}'.format(path)) + + r = automation.run_native_script({'run_script_input':run_script_input, + 'env':env, + 'script_name':'run_submission_checker'}) + if r['return']>0: + return r + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + path = env['CM_TINYMLPERF_REPO_PATH'] + cur_dir = env['CM_TINYMLPERF_CURRENT_DIR'] + version = env['CM_TINYMLPERF_REPO_VERSION'] + + for ext in ['.csv', '.xlsx']: + + p1 = os.path.join (path, 'summary'+ext) + p2 = os.path.join (cur_dir, 'summary-{}{}'.format(version,ext)) + + if not os.path.isfile(p1): + return {'return':1, 'error':'summary.csv file was not created'} + + if os.path.isfile(p2): + os.remove(p2) + + shutil.copy(p1,p2) + + return {'return':0} diff --git a/script/generate-mlperf-tiny-report/run_submission_checker.bat b/script/generate-mlperf-tiny-report/run_submission_checker.bat new file mode 100644 index 0000000000..5d9a6fbaf1 --- /dev/null +++ b/script/generate-mlperf-tiny-report/run_submission_checker.bat @@ -0,0 +1,10 @@ +cd %CM_TINYMLPERF_REPO_PATH% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +%CM_PYTHON_BIN_WITH_PATH% submission_checker.py --input . +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +%CM_PYTHON_BIN_WITH_PATH% generate_final_report.py --input summary.csv +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/generate-mlperf-tiny-report/run_submission_checker.sh b/script/generate-mlperf-tiny-report/run_submission_checker.sh new file mode 100644 index 0000000000..d858c9b223 --- /dev/null +++ b/script/generate-mlperf-tiny-report/run_submission_checker.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +cd ${CM_TINYMLPERF_REPO_PATH} +test $? -eq 0 || exit $? + +echo "" +${CM_PYTHON_BIN_WITH_PATH} submission_checker.py --input . +test $? -eq 0 || exit $? + +echo "" +${CM_PYTHON_BIN_WITH_PATH} generate_final_report.py --input summary.csv +test $? -eq 0 || exit $? diff --git a/script/generate-mlperf-tiny-submission/README-extra.md b/script/generate-mlperf-tiny-submission/README-extra.md new file mode 100644 index 0000000000..6b36716198 --- /dev/null +++ b/script/generate-mlperf-tiny-submission/README-extra.md @@ -0,0 +1,3 @@ +# Generate MLPerf Tiny Submission Folder from a Results Directory + +This is a work in progress script. diff --git a/script/generate-mlperf-tiny-submission/README.md b/script/generate-mlperf-tiny-submission/README.md new file mode 100644 index 0000000000..7bf2c9aca3 --- /dev/null +++ b/script/generate-mlperf-tiny-submission/README.md @@ -0,0 +1,409 @@ +Automatically generated README for this automation recipe: **generate-mlperf-tiny-submission** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=generate-mlperf-tiny-submission,04289b9fc07b42b6) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-submission)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission` + +`cm run script --tags=generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission ` + +*or* + +`cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission"` + +`cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-submission/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,sut,system-description + - CM script: [get-mlperf-inference-sut-description](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-sut-description) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-submission/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-submission/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-submission/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-submission/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-mlperf-tiny-submission/_cm.json)*** + * + * `if (CM_MLPERF_RUN_STYLE == valid)` + - CM script: [test-script1](https://github.com/gfursin/cm-tests/tree/master/script/test-script1) + - CM script: [test-script2](https://github.com/gfursin/cm-tests/tree/master/script/test-script2) + - CM script: [test-script3](https://github.com/gfursin/cm-tests/tree/master/script/test-script3) + - CM script: [test-script4](https://github.com/gfursin/cm-tests/tree/master/script/test-script4) + - CM script: [test-script5](https://github.com/gfursin/cm-tests/tree/master/script/test-script5) + - CM script: [app-generate-image-dalle-mini-jax-py](https://github.com/cknowledge/tests/tree/master/cm/script/app-generate-image-dalle-mini-jax-py) + - CM script: [app-generate-image-stable-diffusion2-pytorch-cuda-py](https://github.com/cknowledge/tests/tree/master/cm/script/app-generate-image-stable-diffusion2-pytorch-cuda-py) + - CM script: [app-image-classification-onnx-py-ck](https://github.com/cknowledge/tests/tree/master/cm/script/app-image-classification-onnx-py-ck) + - CM script: [app-image-corner-detection-old](https://github.com/cknowledge/tests/tree/master/cm/script/app-image-corner-detection-old) + - CM script: [app-ipol-demo](https://github.com/cknowledge/tests/tree/master/cm/script/app-ipol-demo) + - CM script: [app-stable-diffusion-pytorch-cuda-py](https://github.com/cknowledge/tests/tree/master/cm/script/app-stable-diffusion-pytorch-cuda-py) + - CM script: [not-needed--get-android-cmdline-tools](https://github.com/cknowledge/tests/tree/master/cm/script/not-needed--get-android-cmdline-tools) + - CM script: [not-needed--install-android-cmdline-tools](https://github.com/cknowledge/tests/tree/master/cm/script/not-needed--install-android-cmdline-tools) + - CM script: [gui-llm](https://github.com/cknowledge/cm-private/tree/master/script/gui-llm) + - CM script: [run-refiners-hello-world](https://github.com/cknowledge/cm-reproduce/tree/master/script/run-refiners-hello-world) + - CM script: [reproduce-ieee-acm-micro2023-paper-22](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-22) + - CM script: [reproduce-ieee-acm-micro2023-paper-28](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-28) + - CM script: [reproduce-ieee-acm-micro2023-paper-33](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-33) + - CM script: [reproduce-ieee-acm-micro2023-paper-38](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-38) + - CM script: [reproduce-ieee-acm-micro2023-paper-5](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-5) + - CM script: [reproduce-ieee-acm-micro2023-paper-8](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-8) + - CM script: [reproduce-ieee-acm-micro2023-paper-85](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-85) + - CM script: [reproduce-ieee-acm-micro2023-paper-87](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-87) + - CM script: [reproduce-ieee-acm-micro2023-paper-96](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ieee-acm-micro2023-paper-96) + - CM script: [reproduce-ipol-paper-2022-439a](https://github.com/ctuning/cm4research/tree/master/script/reproduce-ipol-paper-2022-439a) + - CM script: [reproduce-neurips-paper-2022-arxiv-2204.09656](https://github.com/ctuning/cm4research/tree/master/script/reproduce-neurips-paper-2022-arxiv-2204.09656) + - CM script: [run-how-to-run-server](https://github.com/how-to-run/server/tree/master/script/run-how-to-run-server) + - CM script: [app-mlperf-inference-nvidia](https://github.com/cknowledge/cm-tests/tree/master/script/app-mlperf-inference-nvidia) + - CM script: [get-axs](https://github.com/cknowledge/cm-tests/tree/master/script/get-axs) + - CM script: [process-mlperf-inference-results](https://github.com/mlcommons/cm4mlperf-results/tree/master/script/process-mlperf-inference-results) + - CM script: [activate-python-venv](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/activate-python-venv) + - CM script: [add-custom-nvidia-system](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/add-custom-nvidia-system) + - CM script: [app-image-classification-onnx-py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-classification-onnx-py) + - CM script: [app-image-classification-tf-onnx-cpp](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-classification-tf-onnx-cpp) + - CM script: [app-image-classification-torch-py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-classification-torch-py) + - CM script: [app-image-classification-tvm-onnx-py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-classification-tvm-onnx-py) + - CM script: [app-image-corner-detection](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-corner-detection) + - CM script: [app-loadgen-generic-python](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-loadgen-generic-python) + - CM script: [app-mlperf-inference](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference) + - CM script: [app-mlperf-inference-ctuning-cpp-tflite](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite) + - CM script: [app-mlperf-inference-dummy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-dummy) + - CM script: [app-mlperf-inference-intel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-intel) + - CM script: [app-mlperf-inference-mlcommons-cpp](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-mlcommons-cpp) + - CM script: [app-mlperf-inference-mlcommons-python](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-mlcommons-python) + - CM script: [app-mlperf-inference-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia) + - CM script: [app-mlperf-inference-qualcomm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-qualcomm) + - CM script: [app-mlperf-training-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-training-nvidia) + - CM script: [app-mlperf-training-reference](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-training-reference) + - CM script: [app-stable-diffusion-onnx-py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-stable-diffusion-onnx-py) + - CM script: [benchmark-any-mlperf-inference-implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-any-mlperf-inference-implementation) + - CM script: [benchmark-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program) + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + - CM script: [build-docker-image](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-docker-image) + - CM script: [build-dockerfile](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-dockerfile) + - CM script: [build-mlperf-inference-server-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-mlperf-inference-server-nvidia) + - CM script: [calibrate-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/calibrate-model-for.qaic) + - CM script: [compile-model-for.qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-model-for.qaic) + - CM script: [compile-program](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/compile-program) + - CM script: [convert-csv-to-md](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/convert-csv-to-md) + - CM script: [convert-ml-model-huggingface-to-onnx](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/convert-ml-model-huggingface-to-onnx) + - CM script: [copy-to-clipboard](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/copy-to-clipboard) + - CM script: [create-conda-env](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/create-conda-env) + - CM script: [create-fpgaconvnet-app-tinyml](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/create-fpgaconvnet-app-tinyml) + - CM script: [create-fpgaconvnet-config-tinyml](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/create-fpgaconvnet-config-tinyml) + - CM script: [create-patch](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/create-patch) + - CM script: [destroy-terraform](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/destroy-terraform) + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + - CM script: [detect-sudo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-sudo) + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + - CM script: [download-torrent](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-torrent) + - CM script: [dump-pip-freeze](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/dump-pip-freeze) + - CM script: [extract-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/extract-file) + - CM script: [fail](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/fail) + - CM script: [flash-tinyml-binary](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/flash-tinyml-binary) + - CM script: [generate-mlperf-inference-submission](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-submission) + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + - CM script: [generate-mlperf-tiny-report](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-tiny-report) + - CM script: [generate-mlperf-tiny-submission](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-tiny-submission) + - CM script: [generate-nvidia-engine](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-nvidia-engine) + - CM script: [get-android-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-android-sdk) + - CM script: [get-aocl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-aocl) + - CM script: [get-aws-cli](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-aws-cli) + - CM script: [get-bazel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-bazel) + - CM script: [get-bert-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-bert-squad-vocab) + - CM script: [get-blis](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-blis) + - CM script: [get-brew](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-brew) + - CM script: [get-ck](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ck) + - CM script: [get-ck-repo-mlops](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ck-repo-mlops) + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + - CM script: [get-cmsis_5](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmsis_5) + - CM script: [get-compiler-flags](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-compiler-flags) + - CM script: [get-compiler-rust](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-compiler-rust) + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + - CM script: [get-croissant](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-croissant) + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + - CM script: [get-cuda-devices](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda-devices) + - CM script: [get-cudnn](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cudnn) + - CM script: [get-dataset-cifar10](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cifar10) + - CM script: [get-dataset-cnndm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm) + - CM script: [get-dataset-coco](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-coco) + - CM script: [get-dataset-coco2014](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-coco2014) + - CM script: [get-dataset-cognata](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cognata) + - CM script: [get-dataset-criteo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-criteo) + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + - CM script: [get-dataset-imagenet-calibration](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-calibration) + - CM script: [get-dataset-imagenet-helper](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-helper) + - CM script: [get-dataset-imagenet-train](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-train) + - CM script: [get-dataset-imagenet-val](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-val) + - CM script: [get-dataset-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-kits19) + - CM script: [get-dataset-librispeech](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-librispeech) + - CM script: [get-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages) + - CM script: [get-dataset-openimages-annotations](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages-annotations) + - CM script: [get-dataset-openimages-calibration](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages-calibration) + - CM script: [get-dataset-openorca](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openorca) + - CM script: [get-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad) + - CM script: [get-dataset-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad-vocab) + - CM script: [get-dlrm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dlrm) + - CM script: [get-dlrm-data-mlperf-inference](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dlrm-data-mlperf-inference) + - CM script: [get-docker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-docker) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + - CM script: [get-github-cli](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-github-cli) + - CM script: [get-go](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-go) + - CM script: [get-google-saxml](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-google-saxml) + - CM script: [get-google-test](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-google-test) + - CM script: [get-ipol-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ipol-src) + - CM script: [get-java](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-java) + - CM script: [get-javac](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-javac) + - CM script: [get-lib-armnn](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-lib-armnn) + - CM script: [get-lib-dnnl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-lib-dnnl) + - CM script: [get-lib-protobuf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-lib-protobuf) + - CM script: [get-lib-qaic-api](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-lib-qaic-api) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + - CM script: [get-microtvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-microtvm) + - CM script: [get-ml-model-3d-unet-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-3d-unet-kits19) + - CM script: [get-ml-model-abtf-ssd-pytorch](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-abtf-ssd-pytorch) + - CM script: [get-ml-model-bert-base-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-base-squad) + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + - CM script: [get-ml-model-dlrm-terabyte](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-dlrm-terabyte) + - CM script: [get-ml-model-efficientnet-lite](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-efficientnet-lite) + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + - CM script: [get-ml-model-llama2](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-llama2) + - CM script: [get-ml-model-mobilenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-mobilenet) + - CM script: [get-ml-model-neuralmagic-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-neuralmagic-zoo) + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + - CM script: [get-ml-model-retinanet-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet-nvidia) + - CM script: [get-ml-model-rnnt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-rnnt) + - CM script: [get-ml-model-stable-diffusion](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-stable-diffusion) + - CM script: [get-ml-model-tiny-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-tiny-resnet) + - CM script: [get-ml-model-using-imagenet-from-model-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo) + - CM script: [get-mlperf-inference-intel-scratch-space](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-intel-scratch-space) + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + - CM script: [get-mlperf-inference-nvidia-common-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code) + - CM script: [get-mlperf-inference-nvidia-scratch-space](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space) + - CM script: [get-mlperf-inference-results](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results) + - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir) + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + - CM script: [get-mlperf-inference-submission-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-submission-dir) + - CM script: [get-mlperf-inference-sut-configs](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-sut-configs) + - CM script: [get-mlperf-inference-sut-description](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-sut-description) + - CM script: [get-mlperf-inference-utils](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-utils) + - CM script: [get-mlperf-logging](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-logging) + - CM script: [get-mlperf-power-dev](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-power-dev) + - CM script: [get-mlperf-tiny-eembc-energy-runner-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src) + - CM script: [get-mlperf-tiny-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-tiny-src) + - CM script: [get-mlperf-training-nvidia-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-nvidia-code) + - CM script: [get-mlperf-training-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-src) + - CM script: [get-nvidia-docker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-nvidia-docker) + - CM script: [get-nvidia-mitten](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-nvidia-mitten) + - CM script: [get-onnxruntime-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-onnxruntime-prebuilt) + - CM script: [get-openssl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-openssl) + - CM script: [get-preprocessed-dataset-criteo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-criteo) + - CM script: [get-preprocesser-script-generic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocesser-script-generic) + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + - CM script: [get-preprocessed-dataset-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-kits19) + - CM script: [get-preprocessed-dataset-librispeech](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-librispeech) + - CM script: [get-preprocessed-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openimages) + - CM script: [get-preprocessed-dataset-openorca](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openorca) + - CM script: [get-preprocessed-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-squad) + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + - CM script: [get-qaic-apps-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-qaic-apps-sdk) + - CM script: [get-qaic-platform-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-qaic-platform-sdk) + - CM script: [get-qaic-software-kit](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-qaic-software-kit) + - CM script: [get-rclone](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-rclone) + - CM script: [get-rocm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-rocm) + - CM script: [get-spec-ptd](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-spec-ptd) + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + - CM script: [get-sys-utils-min](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-min) + - CM script: [get-target-device](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-target-device) + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + - CM script: [get-terraform](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-terraform) + - CM script: [get-tvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm) + - CM script: [get-tvm-model](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm-model) + - CM script: [get-xilinx-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-xilinx-sdk) + - CM script: [get-zendnn](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zendnn) + - CM script: [get-zephyr](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zephyr) + - CM script: [get-zephyr-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zephyr-sdk) + - CM script: [gui](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/gui) + - CM script: [import-mlperf-inference-to-experiment](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-mlperf-inference-to-experiment) + - CM script: [import-mlperf-tiny-to-experiment](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-mlperf-tiny-to-experiment) + - CM script: [import-mlperf-training-to-experiment](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-mlperf-training-to-experiment) + - CM script: [install-aws-cli](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-aws-cli) + - CM script: [install-bazel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-bazel) + - CM script: [install-cmake-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-cmake-prebuilt) + - CM script: [install-cuda-package-manager](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-cuda-package-manager) + - CM script: [install-cuda-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-cuda-prebuilt) + - CM script: [install-gcc-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-gcc-src) + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + - CM script: [install-gflags](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-gflags) + - CM script: [install-github-cli](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-github-cli) + - CM script: [install-ipex-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-ipex-from-src) + - CM script: [install-llvm-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-prebuilt) + - CM script: [install-llvm-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-src) + - CM script: [install-mlperf-logging-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-mlperf-logging-from-src) + - CM script: [install-nccl-libs](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-nccl-libs) + - CM script: [install-numactl-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-numactl-from-src) + - CM script: [install-onednn-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onednn-from-src) + - CM script: [install-onnxruntime-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onnxruntime-from-src) + - CM script: [install-openssl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-openssl) + - CM script: [install-pip-package-for-cmind-python](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pip-package-for-cmind-python) + - CM script: [install-python-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-python-src) + - CM script: [install-python-venv](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-python-venv) + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + - CM script: [install-pytorch-kineto-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-kineto-from-src) + - CM script: [install-qaic-compute-sdk-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-qaic-compute-sdk-from-src) + - CM script: [install-rocm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-rocm) + - CM script: [install-tensorflow-for-c](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tensorflow-for-c) + - CM script: [install-tensorflow-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tensorflow-from-src) + - CM script: [install-terraform-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-terraform-from-src) + - CM script: [install-tflite-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tflite-from-src) + - CM script: [install-torchvision-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-torchvision-from-src) + - CM script: [install-tpp-pytorch-extension](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-tpp-pytorch-extension) + - CM script: [install-transformers-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-transformers-from-src) + - CM script: [launch-benchmark](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/launch-benchmark) + - CM script: [prepare-training-data-bert](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/prepare-training-data-bert) + - CM script: [prepare-training-data-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/prepare-training-data-resnet) + - CM script: [preprocess-mlperf-inference-submission](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/preprocess-mlperf-inference-submission) + - CM script: [print-croissant-desc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-croissant-desc) + - CM script: [print-hello-world](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-hello-world) + - CM script: [print-hello-world-java](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-hello-world-java) + - CM script: [print-hello-world-javac](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-hello-world-javac) + - CM script: [print-hello-world-py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-hello-world-py) + - CM script: [print-python-version](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-python-version) + - CM script: [process-ae-users](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-ae-users) + - CM script: [process-mlperf-accuracy](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-mlperf-accuracy) + - CM script: [prune-bert-models](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/prune-bert-models) + - CM script: [publish-results-to-dashboard](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/publish-results-to-dashboard) + - CM script: [pull-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/pull-git-repo) + - CM script: [push-csv-to-spreadsheet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/push-csv-to-spreadsheet) + - CM script: [push-mlperf-inference-results-to-github](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/push-mlperf-inference-results-to-github) + - CM script: [remote-run-commands](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/remote-run-commands) + - CM script: [reproduce-ipol-paper-2022-439](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-ipol-paper-2022-439) + - CM script: [reproduce-mlperf-octoml-tinyml-results](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results) + - CM script: [reproduce-mlperf-training-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-training-nvidia) + - CM script: [run-docker-container](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-docker-container) + - CM script: [run-mlperf-inference-app](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-app) + - CM script: [run-mlperf-inference-mobilenet-models](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-mobilenet-models) + - CM script: [run-mlperf-inference-submission-checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) + - CM script: [run-mlperf-power-client](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-power-client) + - CM script: [run-mlperf-power-server](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-power-server) + - CM script: [run-mlperf-training-submission-checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-training-submission-checker) + - CM script: [run-python](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-python) + - CM script: [run-terraform](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-terraform) + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) + - CM script: [set-device-settings-qaic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-device-settings-qaic) + - CM script: [set-echo-off-win](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-echo-off-win) + - CM script: [set-performance-mode](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-performance-mode) + - CM script: [set-sqlite-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-sqlite-dir) + - CM script: [tar-my-folder](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/tar-my-folder) + - CM script: [test-abtf-ssd-pytorch](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/test-abtf-ssd-pytorch) + - CM script: [test-download-and-extract-artifacts](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/test-download-and-extract-artifacts) + - CM script: [test-mlperf-inference-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/test-mlperf-inference-retinanet) + - CM script: [test-onnxruntime-cpp](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/test-onnxruntime-cpp) + - CM script: [test-set-sys-user-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/test-set-sys-user-cm) + - CM script: [truncate-mlperf-inference-accuracy-log](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) + - CM script: [upgrade-python-pip](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/upgrade-python-pip) + - CM script: [wrapper-reproduce-octoml-tinyml-submission](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission) + +___ +### Script output +`cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/generate-mlperf-tiny-submission/_cm.json b/script/generate-mlperf-tiny-submission/_cm.json new file mode 100644 index 0000000000..3ecbde6c30 --- /dev/null +++ b/script/generate-mlperf-tiny-submission/_cm.json @@ -0,0 +1,40 @@ +{ + "alias": "generate-mlperf-tiny-submission", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "MLPerf benchmark support", + "cache": false, + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,sut,system-description" + } + ], + "post_deps": [ + { + "enable_if_env": { + "CM_MLPERF_RUN_STYLE": [ + "valid" + ] + } + } + ], + "tags": [ + "generate", + "submission", + "mlperf", + "mlperf-tiny", + "tiny", + "mlcommons", + "tiny-submission", + "mlperf-tiny-submission", + "mlcommons-tiny-submission" + ], + "uid": "04289b9fc07b42b6" +} diff --git a/script/generate-mlperf-tiny-submission/customize.py b/script/generate-mlperf-tiny-submission/customize.py new file mode 100644 index 0000000000..026c6d623f --- /dev/null +++ b/script/generate-mlperf-tiny-submission/customize.py @@ -0,0 +1,157 @@ +from cmind import utils +import os +import json +import shutil + +def preprocess(i): + return generate_submission(i) + + +############################################################################## + +def generate_submission(i): + + # Save current user directory + cur_dir=os.getcwd() + env = i['env'] + state = i['state'] + inp=i['input'] + results_dir = env['CM_MLPERF_RESULTS_DIR'] + + if 'CM_MLPERF_SUBMISSION_DIR' not in env: + env['CM_MLPERF_SUBMISSION_DIR'] = os.path.join(cur_dir, "results") + submission_dir = env['CM_MLPERF_SUBMISSION_DIR'] + if not os.path.isdir(submission_dir): + os.makedirs(submission_dir) + + print('* MLPerf tiny submission dir: {}'.format(submission_dir)) + print('* MLPerf tiny results dir: {}'.format(results_dir)) + results = [f for f in os.listdir(results_dir) if not os.path.isfile(os.path.join(results_dir, f))] + + division=inp.get('division','open') + + if division not in ['open','closed']: + return {'return':1, 'error':'"division" must be "open" or "closed"'} + system_meta = state['CM_SUT_META'] + division = system_meta['division'] + + print('* MLPerf tiny division: {}'.format(division)) + + path_submission_root = submission_dir + path_submission_division=os.path.join(path_submission_root, division) + if not os.path.isdir(path_submission_division): + os.makedirs(path_submission_division) + + # Check submitter + submitter = system_meta['submitter'] + env['CM_MLPERF_SUBMITTER'] = submitter + + print('* MLPerf tiny submitter: {}'.format(submitter)) + + path_submission=os.path.join(path_submission_division, submitter) + if not os.path.isdir(path_submission): + os.makedirs(path_submission) + + # SUT base + system=i.get('system','default') + + code_path = os.path.join(path_submission, "code") + for res in results: + parts = res.split("-") + backend = parts[0] + target = parts[1] + framework = backend + + print('* Target: {}'.format(target)) + print('* Framework: {}'.format(framework)) + result_path = os.path.join(results_dir, res) + platform_prefix = inp.get('platform_prefix', '') + if platform_prefix: + sub_res = platform_prefix + "-" + res + else: + sub_res = res + submission_path = os.path.join(path_submission, "results", sub_res) + measurement_path = os.path.join(path_submission, "measurements", sub_res) + compliance_path = os.path.join(path_submission, "compliance", sub_res) + system_path = os.path.join(path_submission, "systems") + submission_system_path = system_path + if not os.path.isdir(submission_system_path): + os.makedirs(submission_system_path) + system_file = os.path.join(submission_system_path, sub_res+".json") + with open(system_file, "w") as fp: + json.dump(system_meta, fp, indent=2) + + models = [f for f in os.listdir(result_path) if not os.path.isfile(os.path.join(result_path, f))] + for model in models: + result_model_path = os.path.join(result_path, model) + submission_model_path = os.path.join(submission_path, model) + measurement_model_path = os.path.join(measurement_path, model) + compliance_model_path = os.path.join(compliance_path, model) + code_model_path = os.path.join(code_path, model) + scenarios = [f for f in os.listdir(result_model_path) if not os.path.isfile(os.path.join(result_model_path, f))] + submission_code_path = code_model_path + if not os.path.isdir(submission_code_path): + os.makedirs(submission_code_path) + if not os.path.exists(os.path.join(submission_code_path, "README.md")): + with open(os.path.join(submission_code_path, "README.md"), mode='w'): pass #create an empty README + + print('* MLPerf inference model: {}'.format(model)) + for scenario in scenarios: + result_scenario_path = os.path.join(result_model_path, scenario) + submission_scenario_path = os.path.join(submission_model_path, scenario) + measurement_scenario_path = os.path.join(measurement_model_path, scenario) + compliance_scenario_path = os.path.join(compliance_model_path, scenario) + + modes = [f for f in os.listdir(result_scenario_path) if not os.path.isfile(os.path.join(result_scenario_path, f))] + for mode in modes: + result_mode_path = os.path.join(result_scenario_path, mode) + submission_mode_path = os.path.join(submission_scenario_path, mode) + submission_results_path = submission_mode_path + submission_measurement_path = measurement_scenario_path + submission_compliance_path = os.path.join(compliance_scenario_path, mode) + if mode=='performance': + result_mode_path=os.path.join(result_mode_path, 'run_1') + submission_results_path=os.path.join(submission_mode_path, 'run_1') + if not os.path.isdir(submission_results_path): + os.makedirs(submission_results_path) + if not os.path.isdir(submission_measurement_path): + os.makedirs(submission_measurement_path) + if not os.path.isdir(submission_compliance_path): + os.makedirs(submission_compliance_path) + mlperf_inference_conf_path = os.path.join(result_mode_path, "mlperf.conf") + if os.path.exists(mlperf_inference_conf_path): + shutil.copy(mlperf_inference_conf_path, os.path.join(submission_measurement_path, 'mlperf.conf')) + user_conf_path = os.path.join(result_mode_path, "user.conf") + if os.path.exists(user_conf_path): + shutil.copy(user_conf_path, os.path.join(submission_measurement_path, 'user.conf')) + measurements_json_path = os.path.join(result_mode_path, "measurements.json") + if os.path.exists(user_conf_path): + shutil.copy(measurements_json_path, os.path.join(submission_measurement_path, sub_res+'.json')) + files = [] + readme = False + for f in os.listdir(result_mode_path): + if f.startswith('mlperf_'): + files.append(f) + if f == "README.md": + shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, f)) + readme = True + + if mode == "accuracy": + if os.path.exists(os.path.join(result_mode_path, "accuracy.txt")): + files.append("accuracy.txt") + + for f in files: + print(' * ' + f) + p_target = os.path.join(submission_results_path, f) + shutil.copy(os.path.join(result_mode_path, f), p_target) + + if not readme: + with open(os.path.join(submission_measurement_path, "README.md"), mode='w'): pass #create an empty README + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/generate-nvidia-engine/README-about.md b/script/generate-nvidia-engine/README-about.md new file mode 100644 index 0000000000..b8cb79e7a6 --- /dev/null +++ b/script/generate-nvidia-engine/README-about.md @@ -0,0 +1 @@ +This CM script is in draft stage diff --git a/script/generate-nvidia-engine/README.md b/script/generate-nvidia-engine/README.md new file mode 100644 index 0000000000..1224a924fe --- /dev/null +++ b/script/generate-nvidia-engine/README.md @@ -0,0 +1,242 @@ +Automatically generated README for this automation recipe: **generate-nvidia-engine** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=generate-nvidia-engine,0eef9f05b272401f) ]* + +--- + +This CM script is in draft stage + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *generate,engine,mlperf,inference,nvidia* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "generate engine mlperf inference nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=generate,engine,mlperf,inference,nvidia` + +`cm run script --tags=generate,engine,mlperf,inference,nvidia[,variations] [--input_flags]` + +*or* + +`cmr "generate engine mlperf inference nvidia"` + +`cmr "generate engine mlperf inference nvidia [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,engine,mlperf,inference,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="generate,engine,mlperf,inference,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=generate,engine,mlperf,inference,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "generate engine mlperf inference nvidia[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_MODEL_BATCH_SIZE*: `None` + - Workflow: + * `_copy_streams.#` + - Environment variables: + - *CM_GPU_COPY_STREAMS*: `None` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_resnet50` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "output_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BATCH_COUNT: `1` +* CM_BATCH_SIZE: `1` +* CM_LOADGEN_SCENARIO: `Offline` +* CM_GPU_COPY_STREAMS: `1` +* CM_TENSORRT_WORKSPACE_SIZE: `4194304` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,cuda,_cudnn + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,tensorrt + - CM script: [get-tensorrt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tensorrt) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pycuda + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,loadgen + * CM names: `--adr.['loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,nvidia,mlperf,inference,common-code + * CM names: `--adr.['nvidia-inference-common-code']...` + - CM script: [get-mlperf-inference-nvidia-common-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code) + * get,dataset,preprocessed,imagenet,_NCHW + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['imagenet-preprocessed']...` + - CM script: [get-preprocessed-dataset-imagenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-imagenet) + * get,ml-model,resnet50,_onnx + * `if (CM_MODEL == resnet50)` + * CM names: `--adr.['ml-model', 'resnet50-model']...` + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + * get,dataset,preprocessed,openimages,_validation,_NCHW + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['openimages-preprocessed']...` + - CM script: [get-preprocessed-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openimages) + * get,ml-model,retinanet,_onnx,_fp32 + * `if (CM_MODEL == retinanet)` + * CM names: `--adr.['ml-model', 'retinanet-model']...` + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/generate-nvidia-engine/_cm.yaml) + +___ +### Script output +`cmr "generate engine mlperf inference nvidia [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_MLPERF_*` +#### New environment keys auto-detected from customize diff --git a/script/generate-nvidia-engine/_cm.yaml b/script/generate-nvidia-engine/_cm.yaml new file mode 100644 index 0000000000..7a6852447d --- /dev/null +++ b/script/generate-nvidia-engine/_cm.yaml @@ -0,0 +1,152 @@ +# Identification of this CM script +alias: generate-nvidia-engine +uid: 0eef9f05b272401f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + + +# User-friendly tags to find this CM script +tags: + - generate + - engine + - mlperf + - inference + - nvidia + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_LOADGEN_SCENARIO: 'Offline' + CM_GPU_COPY_STREAMS: '1' + CM_TENSORRT_WORKSPACE_SIZE: '4194304' + +# Map script inputs to environment variables +input_mapping: + output_dir: CM_MLPERF_OUTPUT_DIR + +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect CUDA + - tags: get,cuda,_cudnn + + # Detect Tensorrt + - tags: get,tensorrt + + # Detect numpy + - tags: get,generic-python-lib,_numpy + + # Detect numpy + - tags: get,generic-python-lib,_pycuda + + + ######################################################################## + # Install MLPerf inference dependencies + + # Install MLPerf loadgen + - tags: get,loadgen + names: + - loadgen + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + tags: get,dataset,preprocessed,imagenet,_NCHW + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - ml-model + - resnet50-model + tags: get,ml-model,resnet50,_onnx + + + ######################################################################## + # Install RetinaNet model (ONNX) and OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-preprocessed + tags: get,dataset,preprocessed,openimages,_validation,_NCHW + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - ml-model + - retinanet-model + tags: get,ml-model,retinanet,_onnx,_fp32 + + + + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + env: + CM_MODEL: retinanet + + batch_size.#: + env: + CM_MODEL_BATCH_SIZE: # + + copy_streams.#: + env: + CM_GPU_COPY_STREAMS: # diff --git a/script/generate-nvidia-engine/customize.py b/script/generate-nvidia-engine/customize.py new file mode 100644 index 0000000000..9fcaff093c --- /dev/null +++ b/script/generate-nvidia-engine/customize.py @@ -0,0 +1,30 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + if 'CM_MODEL' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_DEVICE' not in env: + return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + scenarios = env['CM_LOADGEN_SCENARIO']#will later extend to other scenarios + cmd = " --action generate_engines " +\ + " --benchmarks " + env['CM_MODEL']+ \ + " --scenarios " + scenarios + \ + " --gpu_batch_size="+env['CM_MODEL_BATCH_SIZE'] +\ + " --gpu_copy_streams="+env['CM_GPU_COPY_STREAMS'] +\ + " --workspace_size="+env['CM_TENSORRT_WORKSPACE_SIZE'] +~ + return {'return':0} + +def postprocess(i): + + env = i['env'] + return {'return':0} diff --git a/script/generate-nvidia-engine/run.sh b/script/generate-nvidia-engine/run.sh new file mode 100644 index 0000000000..c5dd2d9a44 --- /dev/null +++ b/script/generate-nvidia-engine/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +nvidia_code_path=${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} +cd ${nvidia_code_path} +scenarios=${CM_TMP_LOADGEN_SCENARIOS} +#batchsize=$ +python3 code/main.py --action generate_engines --benchmarks resnet50 --scenarios $scenarios --gpu_batch_size=256 --gpu_copy_streams=1 --workspace_size=4194304 diff --git a/script/get-android-sdk/README-extra.md b/script/get-android-sdk/README-extra.md new file mode 100644 index 0000000000..c15c6df33e --- /dev/null +++ b/script/get-android-sdk/README-extra.md @@ -0,0 +1,3 @@ +# About + +https://developer.android.com/studio#command-line-tools-only diff --git a/script/get-android-sdk/README.md b/script/get-android-sdk/README.md new file mode 100644 index 0000000000..e9345d8316 --- /dev/null +++ b/script/get-android-sdk/README.md @@ -0,0 +1,153 @@ +Automatically generated README for this automation recipe: **get-android-sdk** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-android-sdk,8c5b4b83d49c441a) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-android-sdk)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,android,sdk,android-sdk* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get android sdk android-sdk" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,android,sdk,android-sdk` + +`cm run script --tags=get,android,sdk,android-sdk [--input_flags]` + +*or* + +`cmr "get android sdk android-sdk"` + +`cmr "get android sdk android-sdk " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,android,sdk,android-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,android,sdk,android-sdk"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,android,sdk,android-sdk) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get android sdk android-sdk" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--android_cmake_version=value` → `CM_ANDROID_CMAKE_VERSION=value` +* `--android_ndk_version=value` → `CM_ANDROID_NDK_VERSION=value` +* `--android_version=value` → `CM_ANDROID_VERSION=value` +* `--build_tools_version=value` → `CM_ANDROID_BUILD_TOOLS_VERSION=value` +* `--cmdline_tools_version=value` → `CM_ANDROID_CMDLINE_TOOLS_VERSION=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "android_cmake_version":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_ANDROID_BUILD_TOOLS_VERSION: `29.0.3` +* CM_ANDROID_CMAKE_VERSION: `3.6.4111459` +* CM_ANDROID_CMDLINE_TOOLS_URL: `https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip` +* CM_ANDROID_CMDLINE_TOOLS_VERSION: `9123335` +* CM_ANDROID_NDK_VERSION: `21.3.6528147` +* CM_ANDROID_VERSION: `30` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-android-sdk/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,java + - CM script: [get-java](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-java) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-android-sdk/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-android-sdk/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-android-sdk/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-android-sdk/_cm.json) + +___ +### Script output +`cmr "get android sdk android-sdk " [--input_flags] -j` +#### New environment keys (filter) + +* `+PATH` +* `ANDROID_HOME` +* `ANDROID_NDK_HOME` +* `CM_ANDROID_HOME` +#### New environment keys auto-detected from customize + +* `CM_ANDROID_HOME` \ No newline at end of file diff --git a/script/get-android-sdk/_cm.json b/script/get-android-sdk/_cm.json new file mode 100644 index 0000000000..4009ff5d92 --- /dev/null +++ b/script/get-android-sdk/_cm.json @@ -0,0 +1,43 @@ +{ + "alias": "get-android-sdk", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "default_env": { + "CM_ANDROID_BUILD_TOOLS_VERSION": "29.0.3", + "CM_ANDROID_CMAKE_VERSION": "3.6.4111459", + "CM_ANDROID_CMDLINE_TOOLS_URL": "https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip", + "CM_ANDROID_CMDLINE_TOOLS_VERSION": "9123335", + "CM_ANDROID_NDK_VERSION": "21.3.6528147", + "CM_ANDROID_VERSION": "30" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "get,java" + } + ], + "input_mapping": { + "android_cmake_version": "CM_ANDROID_CMAKE_VERSION", + "android_ndk_version": "CM_ANDROID_NDK_VERSION", + "android_version": "CM_ANDROID_VERSION", + "build_tools_version": "CM_ANDROID_BUILD_TOOLS_VERSION", + "cmdline_tools_version": "CM_ANDROID_CMDLINE_TOOLS_VERSION" + }, + "new_env_keys": [ + "CM_ANDROID_HOME", + "ANDROID_HOME", + "ANDROID_NDK_HOME", + "+PATH" + ], + "tags": [ + "get", + "android", + "sdk", + "android-sdk" + ], + "uid": "8c5b4b83d49c441a" +} diff --git a/script/get-android-sdk/customize.py b/script/get-android-sdk/customize.py new file mode 100644 index 0000000000..88248df9d9 --- /dev/null +++ b/script/get-android-sdk/customize.py @@ -0,0 +1,169 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + platform = os_info['platform'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + run_script_input = i['run_script_input'] + + # Check if ANDROID_HOME is already set + android_home = os.environ.get('ANDROID_HOME','').strip() + + # We are inside CM cache entry + cur_dir = os.getcwd() + + if android_home == '': + android_home = cur_dir + + env['CM_ANDROID_HOME']=android_home + env['ANDROID_HOME']=android_home + + paths = [] + + # Check SDK manager + ext = '' + host_os_for_android = 'linux' + host_os_for_ndk = 'linux-x86_64' + if platform == "windows": + host_os_for_android = 'win' + host_os_for_ndk = 'windows-x86_64' + ext = '.bat' + elif platform == "darwin": + host_os_for_android = 'mac' + + sdk_manager_file = 'sdkmanager'+ext + + print ('') + + found = False + + for x in ['cmdline-tools', 'cmdline-tools'+os.sep+'tools', 'tools']: + sdk_manager_path = os.path.join(android_home, x, 'bin', sdk_manager_file) + if os.path.isfile(sdk_manager_path): + found = True + break + + if not found: + # Some magic for cmdline tools (need specific directory) + new_path = os.path.join(android_home, 'cmdline-tools') + if not os.path.isdir(new_path): + os.makedirs(new_path) + + os.chdir(new_path) + + cmdline_tools_version=env.get('CM_ANDROID_CMDLINE_TOOLS_VERSION','') + + env['CM_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version + + package_url = env['CM_ANDROID_CMDLINE_TOOLS_URL'] + package_url = package_url.replace('${CM_ANDROID_CMDLINE_TOOLS_OS}', host_os_for_android) + package_url = package_url.replace('${CM_ANDROID_CMDLINE_TOOLS_VERSION}', cmdline_tools_version) + + env['CM_ANDROID_CMDLINE_TOOLS_URL'] = package_url + + print ('') + print ('Downloading from {} ...'.format(package_url)) + + cm = automation.cmind + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':package_url}) + if r['return']>0: return r + + filename = r['filename'] + + print ('Unzipping file {}'.format(filename)) + + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', + 'filename':filename, + 'strip_folders':0}) + if r['return']>0: return r + +# if os.path.isfile(filename): +# print ('Removing file {}'.format(filename)) +# os.remove(filename) + + os.rename('cmdline-tools', 'tools') + + os.chdir(cur_dir) + + sdk_manager_path = os.path.join(android_home, 'cmdline-tools', 'tools', 'bin', sdk_manager_file) + + sdk_manager_dir = os.path.dirname(sdk_manager_path) + + env['CM_ANDROID_SDK_MANAGER_BIN'] = sdk_manager_file + env['CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH'] = sdk_manager_path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = cur_dir + + paths.append(sdk_manager_dir) + + # Prepare SDK + print ('Preparing Android SDK manager ...') + + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'prepare-sdk-manager'}) + if r['return']>0: return r + + + build_tools_version=env['CM_ANDROID_BUILD_TOOLS_VERSION'] + + path_build_tools = os.path.join(android_home, 'build-tools', build_tools_version) + env['CM_ANDROID_BUILD_TOOLS_PATH']=path_build_tools + paths.append(path_build_tools) + + + cmake_version=env['CM_ANDROID_CMAKE_VERSION'] + + path_cmake = os.path.join(android_home, 'cmake', cmake_version, 'bin') + env['CM_ANDROID_CMAKE_PATH']=path_cmake + paths.append(path_cmake) + + + path_emulator = os.path.join(android_home, 'emulator') + env['CM_ANDROID_EMULATOR_PATH']=path_emulator + paths.append(path_emulator) + + path_platform_tools = os.path.join(android_home, 'platform-tools') + env['CM_ANDROID_PLATFORM_TOOLS_PATH']=path_platform_tools + paths.append(path_platform_tools) + + + android_version=env['CM_ANDROID_VERSION'] + + path_platforms = os.path.join(android_home, 'platforms', android_version) + env['CM_ANDROID_PLATFORMS_PATH']=path_platforms + + + path_tools = os.path.join(android_home, 'tools') + env['CM_ANDROID_TOOLS_PATH']=path_tools + paths.append(path_tools) + + android_ndk_version=env['CM_ANDROID_NDK_VERSION'] + + # Check Android NDK + path_ndk = os.path.join(android_home, 'ndk', android_ndk_version) + env['CM_ANDROID_NDK_PATH']=path_ndk + env['ANDROID_NDK_HOME']=path_ndk + + + + path_ndk_compiler = os.path.join(path_ndk, 'toolchains', 'llvm', 'prebuilt', host_os_for_ndk, 'bin') + env['CM_ANDROID_LLVM_PATH']=path_ndk_compiler + env['CM_ANDROID_LLVM_CLANG_BIN_WITH_PATH']=os.path.join(path_ndk_compiler, 'clang.exe') + paths.append(path_ndk_compiler) + + + + env['+PATH'] = paths + + return {'return':0} #, 'version': version} diff --git a/script/get-android-sdk/prepare-sdk-manager.bat b/script/get-android-sdk/prepare-sdk-manager.bat new file mode 100644 index 0000000000..5b1add122a --- /dev/null +++ b/script/get-android-sdk/prepare-sdk-manager.bat @@ -0,0 +1,27 @@ +echo %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +more tmp-ver.out + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --licenses +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% ^ + "tools" ^ + "platform-tools" ^ + "extras;android;m2repository" ^ + "extras;google;m2repository" ^ + "extras;google;google_play_services" ^ + "build-tools;%CM_ANDROID_BUILD_TOOLS_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "platforms;android-%CM_ANDROID_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "cmake;%CM_ANDROID_CMAKE_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "ndk;%CM_ANDROID_NDK_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-android-sdk/prepare-sdk-manager.sh b/script/get-android-sdk/prepare-sdk-manager.sh new file mode 100644 index 0000000000..8613a43b14 --- /dev/null +++ b/script/get-android-sdk/prepare-sdk-manager.sh @@ -0,0 +1,26 @@ +echo ${JAVA_HOME} +echo ${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --version > tmp-ver.out +cat tmp-ver.out + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --licenses +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} \ + "tools" \ + "platform-tools" \ + "extras;android;m2repository" \ + "extras;google;m2repository" \ + "extras;google;google_play_services" \ + "build-tools;${CM_ANDROID_BUILD_TOOLS_VERSION}" +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "platforms;android-${CM_ANDROID_VERSION}" +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "cmake;${CM_ANDROID_CMAKE_VERSION}" +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "ndk;${CM_ANDROID_NDK_VERSION}" +test $? -eq 0 || exit 1 diff --git a/script/get-aocl/README-extra.md b/script/get-aocl/README-extra.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/script/get-aocl/README.md b/script/get-aocl/README.md new file mode 100644 index 0000000000..48e352bb66 --- /dev/null +++ b/script/get-aocl/README.md @@ -0,0 +1,139 @@ +Automatically generated README for this automation recipe: **get-aocl** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-aocl,a65d3088f57d413d) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,lib,aocl,amd-optimized,amd* +* Output cached? *true* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get lib aocl amd-optimized amd" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,lib,aocl,amd-optimized,amd` + +`cm run script --tags=get,lib,aocl,amd-optimized,amd ` + +*or* + +`cmr "get lib aocl amd-optimized amd"` + +`cmr "get lib aocl amd-optimized amd " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib,aocl,amd-optimized,amd' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,lib,aocl,amd-optimized,amd"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,lib,aocl,amd-optimized,amd) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get lib aocl amd-optimized amd" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `4.0` + +* `4.0` +* `master` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl/_cm.json)*** + * get,generic,sys-util,_libmpfr-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic-python-lib,_scons + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,git,_repo.https://github.com/amd/aocl-libm-ose + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aocl/_cm.json) + +___ +### Script output +`cmr "get lib aocl amd-optimized amd " -j` +#### New environment keys (filter) + +* `+LD_LIBRARY_PATH` +* `+LIBRARY_PATH` +* `CM_AOCL_BUILD_PATH` +* `CM_AOCL_LIB_PATH` +* `CM_AOCL_SRC_PATH` +#### New environment keys auto-detected from customize + +* `CM_AOCL_BUILD_PATH` +* `CM_AOCL_LIB_PATH` +* `CM_AOCL_SRC_PATH` \ No newline at end of file diff --git a/script/get-aocl/_cm.json b/script/get-aocl/_cm.json new file mode 100644 index 0000000000..8f04231309 --- /dev/null +++ b/script/get-aocl/_cm.json @@ -0,0 +1,51 @@ +{ + "alias": "get-aocl", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": "true", + "category": "Compiler automation", + "tags": [ + "get", + "lib", + "aocl", + "amd-optimized", + "amd" + ], + "new_env_keys": [ + "CM_AOCL_BUILD_PATH", + "CM_AOCL_SRC_PATH", + "CM_AOCL_LIB_PATH", + "+LD_LIBRARY_PATH", + "+LIBRARY_PATH" + ], + "default_version": "4.0", + "uid": "a65d3088f57d413d", + "deps": [ + { + "tags": "get,generic,sys-util,_libmpfr-dev" + }, + { + "tags": "get,generic-python-lib,_scons" + }, + { + "tags": "get,git,_repo.https://github.com/amd/aocl-libm-ose", + "force_env_keys": [ + "CM_GIT_CHECKOUT" + ] + } + ], + "versions": { + "master": { + "env": { + "CM_GIT_CHECKOUT": "master" + } + }, + "4.0": { + "env": { + "CM_GIT_CHECKOUT": "aocl-4.0" + } + } + }, + "variations": { + } +} diff --git a/script/get-aocl/customize.py b/script/get-aocl/customize.py new file mode 100644 index 0000000000..e67702b8ca --- /dev/null +++ b/script/get-aocl/customize.py @@ -0,0 +1,31 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_AOCL_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['CM_AOCL_BUILD_PATH'] = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "build") + aocl_lib_path = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "build", "aocl-release", "src") + env['CM_AOCL_LIB_PATH'] = aocl_lib_path + env['+LIBRARY_PATH'] = [ aocl_lib_path ] if '+LIBRARY_PATH' not in env else env['+LIBRARY_PATH'] + [ aocl_lib_path ] + env['+LD_LIBRARY_PATH'] = [ aocl_lib_path ] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [ aocl_lib_path ] + + return {'return':0} + + diff --git a/script/get-aocl/run.sh b/script/get-aocl/run.sh new file mode 100644 index 0000000000..1b00dd9fd0 --- /dev/null +++ b/script/get-aocl/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash +if [[ -z ${CM_GIT_REPO_CHECKOUT_PATH} ]]; then + echo "Git repository not found!" + exit 1 +fi +cd ${CM_GIT_REPO_CHECKOUT_PATH} +scons +test $? -eq 0 || exit $? + diff --git a/script/get-aria2/README-extra.md b/script/get-aria2/README-extra.md new file mode 100644 index 0000000000..40539d77f2 --- /dev/null +++ b/script/get-aria2/README-extra.md @@ -0,0 +1,9 @@ +# Some commands + +```bash +cmr "get aria2" --version=1.37.0 +cmr "get aria2" --install +cmr "get aria2" --path={path to the directory with aria2} +cmr "get aria2" --input={full path to aria2} +cmr "get aria2" --shell +``` diff --git a/script/get-aria2/README.md b/script/get-aria2/README.md new file mode 100644 index 0000000000..15f92352ec --- /dev/null +++ b/script/get-aria2/README.md @@ -0,0 +1,150 @@ +Automatically generated README for this automation recipe: **get-aria2** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-aria2,d83419a90a0c40d0) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,aria2,get-aria2* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get aria2 get-aria2" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,aria2,get-aria2` + +`cm run script --tags=get,aria2,get-aria2 [--input_flags]` + +*or* + +`cmr "get aria2 get-aria2"` + +`cmr "get aria2 get-aria2 " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aria2,get-aria2' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,aria2,get-aria2"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,aria2,get-aria2) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get aria2 get-aria2" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--install=value` → `CM_FORCE_INSTALL=value` +* `--src=value` → `CM_ARIA2_BUILD_FROM_SRC=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "install":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/_cm.yaml)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aria2/_cm.yaml) + +___ +### Script output +`cmr "get aria2 get-aria2 " [--input_flags] -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_ARIA2_*` +#### New environment keys auto-detected from customize + +* `CM_ARIA2_BIN_WITH_PATH` +* `CM_ARIA2_DOWNLOAD_DIR` +* `CM_ARIA2_DOWNLOAD_FILE` +* `CM_ARIA2_DOWNLOAD_FILE2` +* `CM_ARIA2_DOWNLOAD_URL` +* `CM_ARIA2_INSTALLED_PATH` +* `CM_ARIA2_INSTALLED_TO_CACHE` \ No newline at end of file diff --git a/script/get-aria2/_cm.yaml b/script/get-aria2/_cm.yaml new file mode 100644 index 0000000000..6fdd8bb17f --- /dev/null +++ b/script/get-aria2/_cm.yaml @@ -0,0 +1,37 @@ +alias: get-aria2 +uid: d83419a90a0c40d0 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: Detection or installation of tools and artifacts + +input_mapping: + install: CM_FORCE_INSTALL + src: CM_ARIA2_BUILD_FROM_SRC + +deps: + - tags: detect,cpu + - tags: detect,os + +#called after preprocess from customize.py +#prehook_deps: +# - tags: print,native,hello-world + +env: + CM_REQUIRE_INSTALL: no + CM_ARIA2_DEFAULT_INSTALL_VERSION: "1.37.0" + +new_env_keys: + - CM_ARIA2_* + - +PATH + +print_env_at_the_end: + CM_ARIA2_INSTALLED_PATH: Path to the tool + +tags: +- get +- aria2 +- get-aria2 diff --git a/script/get-aria2/customize.py b/script/get-aria2/customize.py new file mode 100644 index 0000000000..3c65bbe4f6 --- /dev/null +++ b/script/get-aria2/customize.py @@ -0,0 +1,122 @@ +from cmind import utils +import os + +def preprocess(i): + + # Pre-set by CM + os_info = i['os_info'] + env = i['env'] + recursion_spaces = i['recursion_spaces'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + # Check if a given tool is already installed + file_name_core = 'aria2c' + file_name = file_name_core+'.exe' if os_info['platform'] == 'windows' else file_name_core + + force_install = env.get('CM_FORCE_INSTALL', False) == True + + if not force_install: + r = i['automation'].find_artifact({'file_name': file_name, + 'env':env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_ARIA2_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + # Not found, try install + force_install = True + else: + return r + + # Force install + if force_install: + # Attempt to run installer + version = env.get('CM_VERSION','') + if version == '': version = env['CM_ARIA2_DEFAULT_INSTALL_VERSION'] + + if os_info['platform'] == 'windows': + archive = 'aria2-{}-win-64bit-build1' + ext = '.zip' + ext2 = '' + else: + archive = 'aria2-{}' + ext = '.tar.bz2' + ext2 = '.tar' + + archive = archive.format(version) + archive_with_ext = archive+ext + + env['CM_ARIA2_DOWNLOAD_DIR'] = archive + + env['CM_ARIA2_DOWNLOAD_FILE'] = archive_with_ext + if ext2!='': + env['CM_ARIA2_DOWNLOAD_FILE2'] = archive+ext2 + + url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format(version, archive_with_ext) + env['CM_ARIA2_DOWNLOAD_URL'] = url + + print ('URL to download ARIA2: {}'.format(url)) + + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) + if r['return']>0: return r + + if os_info['platform'] == 'windows' or env.get('CM_ARIA2_BUILD_FROM_SRC', '').lower() == 'true': + install_path = os.path.join(os.getcwd(), archive) + + path_to_file = os.path.join(install_path, file_name) + if not os.path.isfile(path_to_file): + return {'return':1, 'error':'file not found: {}'.format(path_to_file)} + + env['CM_ARIA2_BIN_WITH_PATH'] = path_to_file + env['CM_ARIA2_INSTALLED_TO_CACHE'] = 'yes' + else: + path_to_bin = r['env_tmp'].get('CM_ARIA2_BIN_WITH_PATH','') + env['CM_ARIA2_BIN_WITH_PATH'] = path_to_bin + + r = i['automation'].find_artifact({'file_name': file_name, + 'env':env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_ARIA2_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return']>0: return r + + return {'return':0} + +def detect_version(i): + env = i['env'] + + r = i['automation'].parse_version({'match_text': r'aria2 version\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_ARIA2_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] >0: return r + + version = r['version'] + found_file_path = env['CM_ARIA2_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_ARIA2_INSTALLED_PATH'] = found_path + + if env.get('CM_ARIA2_INSTALLED_TO_CACHE','')=='yes': + env['+PATH'] = [env['CM_ARIA2_INSTALLED_PATH']] + + return {'return':0, 'version': version} diff --git a/script/get-aria2/install.bat b/script/get-aria2/install.bat new file mode 100644 index 0000000000..6255f0cafd --- /dev/null +++ b/script/get-aria2/install.bat @@ -0,0 +1,9 @@ +echo. + +del /Q /S %CM_ARIA2_DOWNLOAD_FILE% + +wget --no-check-certificate %CM_ARIA2_DOWNLOAD_URL% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip -o -q %CM_ARIA2_DOWNLOAD_FILE% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-aria2/install.sh b/script/get-aria2/install.sh new file mode 100644 index 0000000000..d9424732d0 --- /dev/null +++ b/script/get-aria2/install.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +echo "" + +if [[ "${CM_ARIA2_BUILD_FROM_SRC}" == "True" ]]; then + + echo "Building from sources ..." + echo "" + + rm -rf ${CM_ARIA2_DOWNLOAD_FILE} + rm -rf ${CM_ARIA2_DOWNLOAD_FILE2} + + wget --no-check-certificate ${CM_ARIA2_DOWNLOAD_URL} + test $? -eq 0 || exit $? + + bzip2 -d ${CM_ARIA2_DOWNLOAD_FILE} + test $? -eq 0 || exit $? + + tar xvf ${CM_ARIA2_DOWNLOAD_FILE2} + test $? -eq 0 || exit $? + + cd ${CM_ARIA2_DOWNLOAD_DIR} + test $? -eq 0 || exit $? + + ./configure --prefix=$PWD/bin + test $? -eq 0 || exit $? + + make + test $? -eq 0 || exit $? + + make install + test $? -eq 0 || exit $? + +else + echo "Installing binary via sudo ..." + echo "" + + cmd="sudo ${CM_HOST_OS_PACKAGE_MANAGER} install aria2" + echo "$cmd" + + $cmd + test $? -eq 0 || exit $? + + path_to_bin=`which aria2c` + echo "CM_ARIA2_BIN_WITH_PATH=$path_to_bin" > tmp-run-env.out + +fi diff --git a/script/get-aria2/run.bat b/script/get-aria2/run.bat new file mode 100644 index 0000000000..625b7edc03 --- /dev/null +++ b/script/get-aria2/run.bat @@ -0,0 +1,4 @@ +rem Detect version + +%CM_ARIA2_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-aria2/run.sh b/script/get-aria2/run.sh new file mode 100644 index 0000000000..85ba9421a6 --- /dev/null +++ b/script/get-aria2/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Detect version + +${CM_ARIA2_BIN_WITH_PATH} --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-aws-cli/README-extra.md b/script/get-aws-cli/README-extra.md new file mode 100644 index 0000000000..7c84758718 --- /dev/null +++ b/script/get-aws-cli/README-extra.md @@ -0,0 +1,9 @@ +# Get AWS CLI +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed aws-cli on the system and if not found calls the [install script for aws-cli](../script/install-aws-cli). + +## Exported Variables +* `CM_AWS_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-aws-cli/README.md b/script/get-aws-cli/README.md new file mode 100644 index 0000000000..e1f8094538 --- /dev/null +++ b/script/get-aws-cli/README.md @@ -0,0 +1,126 @@ +Automatically generated README for this automation recipe: **get-aws-cli** + +Category: **Cloud automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-aws-cli,dad67944229942a3) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,aws-cli,aws,cli* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get aws-cli aws cli" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,aws-cli,aws,cli` + +`cm run script --tags=get,aws-cli,aws,cli ` + +*or* + +`cmr "get aws-cli aws cli"` + +`cmr "get aws-cli aws cli " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aws-cli,aws,cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,aws-cli,aws,cli"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,aws-cli,aws,cli) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get aws-cli aws cli" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli/_cm.json)*** + * install,aws-cli + * `if (CM_REQUIRE_INSTALL == yes)` + - CM script: [install-aws-cli](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-aws-cli) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-aws-cli/_cm.json) + +___ +### Script output +`cmr "get aws-cli aws cli " -j` +#### New environment keys (filter) + +* `CM_AWS_*` +#### New environment keys auto-detected from customize + +* `CM_AWS_CACHE_TAGS` +* `CM_AWS_INSTALLED_PATH` \ No newline at end of file diff --git a/script/get-aws-cli/_cm.json b/script/get-aws-cli/_cm.json new file mode 100644 index 0000000000..bd8faf7648 --- /dev/null +++ b/script/get-aws-cli/_cm.json @@ -0,0 +1,29 @@ +{ + "alias": "get-aws-cli", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "clean_files": [], + "category": "Cloud automation", + "new_env_keys": [ + "CM_AWS_*" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,aws-cli" + } + ], + "tags": [ + "get", + "aws-cli", + "aws", + "cli" + ], + "uid": "dad67944229942a3" +} diff --git a/script/get-aws-cli/customize.py b/script/get-aws-cli/customize.py new file mode 100644 index 0000000000..af7fd1603b --- /dev/null +++ b/script/get-aws-cli/customize.py @@ -0,0 +1,59 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'aws.exe' if os_info['platform'] == 'windows' else 'aws' + env['FILE_NAME'] = file_name + if 'CM_AWS_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_AWS_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'aws-cli/([\d.]+)\s', + 'group_number': 1, + 'env_key':'CM_AWS_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] >0: return r + + version = r['version'] + found_file_path = env['CM_AWS_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_AWS_INSTALLED_PATH'] = found_path + + env['CM_AWS_CACHE_TAGS'] = 'version-'+version + + return {'return':0, 'version': version} diff --git a/script/get-aws-cli/run.sh b/script/get-aws-cli/run.sh new file mode 100644 index 0000000000..3d65d9ae4f --- /dev/null +++ b/script/get-aws-cli/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +aws --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-bazel/README-extra.md b/script/get-bazel/README-extra.md new file mode 100644 index 0000000000..8e11a61bce --- /dev/null +++ b/script/get-bazel/README-extra.md @@ -0,0 +1,9 @@ +# Get Bazel +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed bazel on the system and if not found calls the [install script for bazel](../script/install-bazel). + +## Exported Variables +* `CM_BAZEL_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-bazel/README.md b/script/get-bazel/README.md new file mode 100644 index 0000000000..d9730240ed --- /dev/null +++ b/script/get-bazel/README.md @@ -0,0 +1,128 @@ +Automatically generated README for this automation recipe: **get-bazel** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-bazel,eaef0be38bac493c) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,bazel,get-bazel* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get bazel get-bazel" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,bazel,get-bazel` + +`cm run script --tags=get,bazel,get-bazel ` + +*or* + +`cmr "get bazel get-bazel"` + +`cmr "get bazel get-bazel " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,bazel,get-bazel' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,bazel,get-bazel"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,bazel,get-bazel) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get bazel get-bazel" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/_cm.json)*** + * install,bazel + * `if (CM_REQUIRE_INSTALL == yes)` + - CM script: [install-bazel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-bazel) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bazel/_cm.json) + +___ +### Script output +`cmr "get bazel get-bazel " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_BAZEL_*` +#### New environment keys auto-detected from customize + +* `CM_BAZEL_CACHE_TAGS` +* `CM_BAZEL_INSTALLED_PATH` \ No newline at end of file diff --git a/script/get-bazel/_cm.json b/script/get-bazel/_cm.json new file mode 100644 index 0000000000..aeb0d83777 --- /dev/null +++ b/script/get-bazel/_cm.json @@ -0,0 +1,28 @@ +{ + "alias": "get-bazel", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "new_env_keys": [ + "CM_BAZEL_*", + "+PATH" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,bazel" + } + ], + "tags": [ + "get", + "bazel", + "get-bazel" + ], + "uid": "eaef0be38bac493c" +} diff --git a/script/get-bazel/customize.py b/script/get-bazel/customize.py new file mode 100644 index 0000000000..c4622a7f4a --- /dev/null +++ b/script/get-bazel/customize.py @@ -0,0 +1,60 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'bazel.exe' if os_info['platform'] == 'windows' else 'bazel' + env['FILE_NAME'] = file_name + if 'CM_BAZEL_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_BAZEL_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'bazel\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_BAZEL_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] >0: return r + + version = r['version'] + found_file_path = env['CM_BAZEL_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_BAZEL_INSTALLED_PATH'] = found_path + env['+PATH'] = [ found_path ] + + env['CM_BAZEL_CACHE_TAGS'] = 'version-'+version + + return {'return':0, 'version': version} diff --git a/script/get-bazel/run.bat b/script/get-bazel/run.bat new file mode 100644 index 0000000000..1e8da4b271 --- /dev/null +++ b/script/get-bazel/run.bat @@ -0,0 +1,2 @@ +%CM_BAZEL_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-bazel/run.sh b/script/get-bazel/run.sh new file mode 100644 index 0000000000..e145f46385 --- /dev/null +++ b/script/get-bazel/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +bazel_bin=${CM_BAZEL_BIN_WITH_PATH} +if [[ ${CM_VERSION} == "0.26.1" ]]; then + ${bazel_bin} version |grep "Build label" |sed 's/Build label:/bazel/' > tmp-ver.out +else + ${bazel_bin} --version > tmp-ver.out +fi +test $? -eq 0 || exit 1 diff --git a/script/get-bert-squad-vocab/README.md b/script/get-bert-squad-vocab/README.md new file mode 100644 index 0000000000..579bc28c42 --- /dev/null +++ b/script/get-bert-squad-vocab/README.md @@ -0,0 +1,121 @@ +Automatically generated README for this automation recipe: **get-bert-squad-vocab** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-bert-squad-vocab,2f99a545ce734157) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bert-squad-vocab)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,bert,squad,bert-large,bert-squad,vocab* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get bert squad bert-large bert-squad vocab" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,bert,squad,bert-large,bert-squad,vocab` + +`cm run script --tags=get,bert,squad,bert-large,bert-squad,vocab ` + +*or* + +`cmr "get bert squad bert-large bert-squad vocab"` + +`cmr "get bert squad bert-large bert-squad vocab " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,bert,squad,bert-large,bert-squad,vocab' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,bert,squad,bert-large,bert-squad,vocab"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,bert,squad,bert-large,bert-squad,vocab) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get bert squad bert-large bert-squad vocab" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bert-squad-vocab/_cm.json) + 1. Run "preprocess" function from customize.py + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bert-squad-vocab/_cm.json)*** + * download,file + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bert-squad-vocab/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-bert-squad-vocab/_cm.json) + +___ +### Script output +`cmr "get bert squad bert-large bert-squad vocab " -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-bert-squad-vocab/_cm.json b/script/get-bert-squad-vocab/_cm.json new file mode 100644 index 0000000000..44f5372cc4 --- /dev/null +++ b/script/get-bert-squad-vocab/_cm.json @@ -0,0 +1,36 @@ +{ + "alias": "get-bert-squad-vocab", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL_VOCAB_TXT": "vocab.txt", + "CM_BERT_VOCAB_FILE_URL": "https://zenodo.org/record/3733868/files/vocab.txt" + }, + "new_env_keys": [ + "CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH" + ], + "tags": [ + "get", + "bert", + "squad", + "bert-large", + "bert-squad", + "vocab" + ], + "prehook_deps": [ + { + "tags": "download,file", + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH" + }, + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_BERT_VOCAB_FILE_URL" ] + } + } + ], + "uid": "2f99a545ce734157", + "variations": { + } +} diff --git a/script/get-blis/README-extra.md b/script/get-blis/README-extra.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/script/get-blis/README.md b/script/get-blis/README.md new file mode 100644 index 0000000000..77cdb8047f --- /dev/null +++ b/script/get-blis/README.md @@ -0,0 +1,160 @@ +Automatically generated README for this automation recipe: **get-blis** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-blis,ea6e1cf75242456c) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,lib,blis* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get lib blis" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,lib,blis` + +`cm run script --tags=get,lib,blis[,variations] ` + +*or* + +`cmr "get lib blis"` + +`cmr "get lib blis [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib,blis' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,lib,blis"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,lib,blis) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get lib blis[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**source**" +
+ Click here to expand this section. + + * `_amd` + - Workflow: + * **`_flame`** (default) + - Workflow: + +
+ + +#### Default variations + +`_flame` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `master` + +* `0.9.0` +* `master` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/_cm.json)*** + * get,git + * CM names: `--adr.['blis-source-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-blis/_cm.json) + +___ +### Script output +`cmr "get lib blis [,variations]" -j` +#### New environment keys (filter) + +* `+LD_LIBRARY_PATH` +* `CM_BLIS_INC` +* `CM_BLIS_INSTALL_PATH` +* `CM_BLIS_LIB` +* `CM_BLIS_SRC_PATH` +#### New environment keys auto-detected from customize + +* `CM_BLIS_INC` +* `CM_BLIS_INSTALL_PATH` +* `CM_BLIS_LIB` +* `CM_BLIS_SRC_PATH` \ No newline at end of file diff --git a/script/get-blis/_cm.json b/script/get-blis/_cm.json new file mode 100644 index 0000000000..642c6166ff --- /dev/null +++ b/script/get-blis/_cm.json @@ -0,0 +1,72 @@ +{ + "alias": "get-blis", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "default_version": "master", + "deps": [ + { + "tags": "get,git", + "names": [ + "blis-source-repo" + ], + "force_env_keys": [ + "CM_GIT_CHECKOUT" + ] + }, + { + "tags": "detect,cpu" + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "CM_BLIS_SRC_PATH", + "+LD_LIBRARY_PATH", + "CM_BLIS_INSTALL_PATH", + "CM_BLIS_INC", + "CM_BLIS_LIB" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "lib", + "blis" + ], + "uid": "ea6e1cf75242456c", + "variations": { + "flame": { + "group": "source", + "default": true, + "add_deps_recursive": { + "blis-source-repo": { + "tags": "_repo.https://github.com/flame/blis.git" + } + } + }, + "amd": { + "group": "source", + "add_deps_recursive": { + "blis-source-repo": { + "tags": "_repo.https://github.com/amd/libflame.git" + } + } + } + }, + "versions": { + "master": { + "env": { + "CM_GIT_CHECKOUT": "master" + } + }, + "0.9.0": { + "env": { + "CM_GIT_CHECKOUT": "0.9.0" + } + } + } +} diff --git a/script/get-blis/customize.py b/script/get-blis/customize.py new file mode 100644 index 0000000000..063366ed21 --- /dev/null +++ b/script/get-blis/customize.py @@ -0,0 +1,36 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + env['CM_BLIS_SRC_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + install_dir = os.path.join(env['CM_BLIS_SRC_PATH'], "install") + + env['CM_BLIS_INSTALL_PATH'] = install_dir + env['CM_BLIS_INC'] = os.path.join(install_dir, 'include', 'blis') + env['CM_BLIS_LIB'] = os.path.join(install_dir, 'lib', 'libblis.a') + + blis_lib_path = os.path.join(install_dir, 'lib') + + env['+LD_LIBRARY_PATH'] = [ blis_lib_path ] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [ blis_lib_path ] + + return {'return':0} + + diff --git a/script/get-blis/run.bat b/script/get-blis/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/get-blis/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/get-blis/run.sh b/script/get-blis/run.sh new file mode 100644 index 0000000000..4c6d91d785 --- /dev/null +++ b/script/get-blis/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +CUR=$PWD +mkdir -p install +test $? -eq 0 || exit $? +INSTALL_DIR=$PWD/install +cd ${CM_BLIS_SRC_PATH} +./configure --prefix=$INSTALL_DIR auto +test $? -eq 0 || exit $? +make -j${CM_HOST_CPU_TOTAL_PHYSICAL_CORES} +test $? -eq 0 || exit $? +make install +test $? -eq 0 || exit $? diff --git a/script/get-brew/README.md b/script/get-brew/README.md new file mode 100644 index 0000000000..1516f2536e --- /dev/null +++ b/script/get-brew/README.md @@ -0,0 +1,119 @@ +Automatically generated README for this automation recipe: **get-brew** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-brew,4a2c5eab1ccf484f) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-brew)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,brew* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get brew" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,brew` + +`cm run script --tags=get,brew ` + +*or* + +`cmr "get brew"` + +`cmr "get brew " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,brew' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,brew"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,brew) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get brew" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-brew/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-brew/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-brew/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-brew/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-brew/_cm.json) + +___ +### Script output +`cmr "get brew " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-brew/_cm.json b/script/get-brew/_cm.json new file mode 100644 index 0000000000..a9d9c8a459 --- /dev/null +++ b/script/get-brew/_cm.json @@ -0,0 +1,22 @@ +{ + "alias": "get-brew", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "Detection or installation of tools and artifacts", + "deps": [], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "brew" + ], + "uid": "4a2c5eab1ccf484f", + "variations": {}, + "versions": {} +} diff --git a/script/get-brew/dockerfiles/ubuntu_22.04.Dockerfile b/script/get-brew/dockerfiles/ubuntu_22.04.Dockerfile new file mode 100644 index 0000000000..15fa071d94 --- /dev/null +++ b/script/get-brew/dockerfiles/ubuntu_22.04.Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +ENV PATH=${PATH}:$HOME/.local/bin +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands + +# Run CM workflow +RUN cm run script --tags=get,brew --fake_run +RUN cm run script --tags=get,brew diff --git a/script/get-brew/run.sh b/script/get-brew/run.sh new file mode 100644 index 0000000000..bdb3af4c4a --- /dev/null +++ b/script/get-brew/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +test $? -eq 0 || exit $? diff --git a/script/get-ck-repo-mlops/README.md b/script/get-ck-repo-mlops/README.md new file mode 100644 index 0000000000..37970fd72a --- /dev/null +++ b/script/get-ck-repo-mlops/README.md @@ -0,0 +1,122 @@ +Automatically generated README for this automation recipe: **get-ck-repo-mlops** + +Category: **Legacy CK support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ck-repo-mlops,d3a619b8186e4f74) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck-repo-mlops)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ck-repo,mlops,ck-repo-mlops* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ck-repo mlops ck-repo-mlops" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ck-repo,mlops,ck-repo-mlops` + +`cm run script --tags=get,ck-repo,mlops,ck-repo-mlops ` + +*or* + +`cmr "get ck-repo mlops ck-repo-mlops"` + +`cmr "get ck-repo mlops ck-repo-mlops " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ck-repo,mlops,ck-repo-mlops' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ck-repo,mlops,ck-repo-mlops"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ck-repo,mlops,ck-repo-mlops) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ck-repo mlops ck-repo-mlops" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck-repo-mlops/_cm.json)*** + * get,ck + - CM script: [get-ck](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ck) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck-repo-mlops/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck-repo-mlops/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck-repo-mlops/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck-repo-mlops/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck-repo-mlops/_cm.json) + +___ +### Script output +`cmr "get ck-repo mlops ck-repo-mlops " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-ck-repo-mlops/_cm.json b/script/get-ck-repo-mlops/_cm.json new file mode 100644 index 0000000000..adefecb94e --- /dev/null +++ b/script/get-ck-repo-mlops/_cm.json @@ -0,0 +1,18 @@ +{ + "alias": "get-ck-repo-mlops", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Legacy CK support", + "deps": [ + { + "tags": "get,ck" + } + ], + "tags": [ + "get", + "ck-repo", + "mlops", + "ck-repo-mlops" + ], + "uid": "d3a619b8186e4f74" +} diff --git a/script/get-ck-repo-mlops/run.bat b/script/get-ck-repo-mlops/run.bat new file mode 100644 index 0000000000..3e3239b8bb --- /dev/null +++ b/script/get-ck-repo-mlops/run.bat @@ -0,0 +1 @@ +ck pull repo:mlcommons@ck-mlops diff --git a/script/get-ck-repo-mlops/run.sh b/script/get-ck-repo-mlops/run.sh new file mode 100644 index 0000000000..ec1267b5c3 --- /dev/null +++ b/script/get-ck-repo-mlops/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +ck pull repo:mlcommons@ck-mlops + diff --git a/script/get-ck/README.md b/script/get-ck/README.md new file mode 100644 index 0000000000..5a858a8c90 --- /dev/null +++ b/script/get-ck/README.md @@ -0,0 +1,120 @@ +Automatically generated README for this automation recipe: **get-ck** + +Category: **Legacy CK support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ck,5575126797174cac) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ck,ck-framework* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ck ck-framework" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ck,ck-framework` + +`cm run script --tags=get,ck,ck-framework ` + +*or* + +`cmr "get ck ck-framework"` + +`cmr "get ck ck-framework " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ck,ck-framework' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ck,ck-framework"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ck,ck-framework) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ck ck-framework" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ck/_cm.json) + +___ +### Script output +`cmr "get ck ck-framework " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-ck/_cm.json b/script/get-ck/_cm.json new file mode 100644 index 0000000000..822cde5ee2 --- /dev/null +++ b/script/get-ck/_cm.json @@ -0,0 +1,13 @@ +{ + "alias": "get-ck", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Legacy CK support", + "cache": true, + "tags": [ + "get", + "ck", + "ck-framework" + ], + "uid": "5575126797174cac" +} diff --git a/script/get-ck/run.bat b/script/get-ck/run.bat new file mode 100644 index 0000000000..75d92799e8 --- /dev/null +++ b/script/get-ck/run.bat @@ -0,0 +1 @@ +pip install ck diff --git a/script/get-ck/run.sh b/script/get-ck/run.sh new file mode 100644 index 0000000000..eae526fd37 --- /dev/null +++ b/script/get-ck/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +python3 -m pip install ck ${CM_CK_FRAMEWORK_INSTALL_CLI} diff --git a/script/get-cl/README-extra.md b/script/get-cl/README-extra.md new file mode 100644 index 0000000000..796ec71131 --- /dev/null +++ b/script/get-cl/README-extra.md @@ -0,0 +1,7 @@ +# Get Microsoft C compiler + +Example to detect a Microsoft C compiler from the Visual Studio: + +```bash +cm run script "get cl" --path="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64" +``` diff --git a/script/get-cl/README.md b/script/get-cl/README.md new file mode 100644 index 0000000000..3c78fbfc69 --- /dev/null +++ b/script/get-cl/README.md @@ -0,0 +1,140 @@ +Automatically generated README for this automation recipe: **get-cl** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-cl,7dbb770faff947c0) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,cl,compiler,c-compiler,cpp-compiler,get-cl* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get cl compiler c-compiler cpp-compiler get-cl" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,cl,compiler,c-compiler,cpp-compiler,get-cl` + +`cm run script --tags=get,cl,compiler,c-compiler,cpp-compiler,get-cl ` + +*or* + +`cmr "get cl compiler c-compiler cpp-compiler get-cl"` + +`cmr "get cl compiler c-compiler cpp-compiler get-cl " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cl,compiler,c-compiler,cpp-compiler,get-cl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,cl,compiler,c-compiler,cpp-compiler,get-cl"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,cl,compiler,c-compiler,cpp-compiler,get-cl) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get cl compiler c-compiler cpp-compiler get-cl" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl/run.bat) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cl/_cm.json) + +___ +### Script output +`cmr "get cl compiler c-compiler cpp-compiler get-cl " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_CL_*` +* `CM_COMPILER_*` +* `CM_CXX_COMPILER_*` +* `CM_C_COMPILER_*` +* `CM_LINKER_*` +#### New environment keys auto-detected from customize + +* `CM_CL_BIN` +* `CM_CL_BIN_WITH_PATH` +* `CM_CL_CACHE_TAGS` +* `CM_COMPILER_CACHE_TAGS` +* `CM_COMPILER_FAMILY` +* `CM_COMPILER_VERSION` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_FLAG_OUTPUT` +* `CM_CXX_COMPILER_FLAG_VERSION` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_FLAG_OUTPUT` +* `CM_C_COMPILER_FLAG_VERSION` +* `CM_C_COMPILER_WITH_PATH` \ No newline at end of file diff --git a/script/get-cl/_cm.json b/script/get-cl/_cm.json new file mode 100644 index 0000000000..cb6c575368 --- /dev/null +++ b/script/get-cl/_cm.json @@ -0,0 +1,30 @@ +{ + "alias": "get-cl", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Compiler automation", + "cache": true, + "clean_files": [], + "name": "Detect or install Microsoft C compiler", + "new_env_keys": [ + "CM_CL_*", + "CM_C_COMPILER_*", + "CM_CXX_COMPILER_*", + "CM_COMPILER_*", + "CM_LINKER_*", + "+PATH" + ], + "new_state_keys": [ + "script_prefix" + ], + "sort": 1000, + "tags": [ + "get", + "cl", + "compiler", + "c-compiler", + "cpp-compiler", + "get-cl" + ], + "uid": "7dbb770faff947c0" +} diff --git a/script/get-cl/customize.py b/script/get-cl/customize.py new file mode 100644 index 0000000000..1d205d8fdf --- /dev/null +++ b/script/get-cl/customize.py @@ -0,0 +1,136 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] != 'windows': + return {'return':0} + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + automation = i['automation'] + + file_name = 'cl.exe' + + # Will check env['CM_TMP_PATH'] if comes from installation script + ii = {'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_CL_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces} + + rr = automation.find_artifact(ii) + if rr['return'] >0 : + # If not found in PATH, try a longer search + if rr['return'] != 16: + return rr + + if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + + print (i['recursion_spaces'] + ' Starting deep search for {} - it may take some time ...'.format(file_name)) + + paths = ['C:\\Program Files\\Microsoft Visual Studio', + 'C:\\Program Files (x86)\\Microsoft Visual Studio', + 'C:\\Program Files (x86)\\Microsoft Visual Studio 14'] + + restrict_paths = ['Hostx64\\x64'] + + r = automation.find_file_deep({'paths':paths, + 'file_name':file_name, + 'restrict_paths':restrict_paths}) + if r['return']>0: return r + + found_paths = r['found_paths'] + + if len(found_paths) == 0: + return rr + + tmp_paths = ';'.join(found_paths) + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + ii['env']=env + + rr = automation.find_artifact(ii) + if rr['return'] >0 : return rr + + else: + return rr + + found_path = rr['found_path'] + + # Check vcvarall.bat + state = i['state'] + script_prefix = state.get('script_prefix',[]) + + # Attempt to find vcvars64.bat + bat_file_name = 'VC\\Auxiliary\\Build\\vcvars64.bat' + r = automation.find_file_back({'path':found_path, 'file_name':bat_file_name}) + if r['return']>0: return r + + found_path_bat = r['found_path'] + + if found_path_bat!='': + path_to_vcvars = os.path.join(found_path_bat, bat_file_name) + + s = os_info['run_bat'].replace('${bat_file}', '"' + path_to_vcvars + '"') + + script_prefix.append(s) + + state['script_prefix'] = script_prefix + + env['CM_CL_BIN']=file_name + env['CM_CL_BIN_WITH_PATH']=os.path.join(found_path, file_name) + + # General compiler for general program compilation + env['CM_C_COMPILER_BIN']=file_name + env['CM_C_COMPILER_WITH_PATH']=os.path.join(found_path, file_name) + env['CM_C_COMPILER_FLAG_OUTPUT']='/Fe:' + env['CM_C_COMPILER_FLAG_VERSION']='' + + env['CM_CXX_COMPILER_BIN']=env['CM_C_COMPILER_BIN'] + env['CM_CXX_COMPILER_WITH_PATH']=env['CM_C_COMPILER_WITH_PATH'] + env['CM_CXX_COMPILER_FLAG_OUTPUT']='/Fe:' + env['CM_CXX_COMPILER_FLAG_VERSION']='' + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'Version\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_CL_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + + + +def postprocess(i): + + env = i['env'] + + r = detect_version(i) + + if r['return'] >0: return r + + version = r['version'] + + env['CM_CL_CACHE_TAGS'] = 'version-'+version + env['CM_COMPILER_CACHE_TAGS'] = 'version-'+version+',family-msvc' + env['CM_COMPILER_FAMILY'] = 'MSVC' + env['CM_COMPILER_VERSION'] = env['CM_CL_VERSION'] + + return {'return':0, 'version':version} diff --git a/script/get-cl/run.bat b/script/get-cl/run.bat new file mode 100644 index 0000000000..2a5fc7c9bf --- /dev/null +++ b/script/get-cl/run.bat @@ -0,0 +1,3 @@ +"%CM_CL_BIN_WITH_PATH%" > tmp-ver.out 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-cmake/README.md b/script/get-cmake/README.md new file mode 100644 index 0000000000..5c056e9055 --- /dev/null +++ b/script/get-cmake/README.md @@ -0,0 +1,131 @@ +Automatically generated README for this automation recipe: **get-cmake** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-cmake,52bf974d791b4fc8) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,cmake,get-cmake* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get cmake get-cmake" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,cmake,get-cmake` + +`cm run script --tags=get,cmake,get-cmake ` + +*or* + +`cmr "get cmake get-cmake"` + +`cmr "get cmake get-cmake " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cmake,get-cmake' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,cmake,get-cmake"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,cmake,get-cmake) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get cmake get-cmake" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/_cm.json)*** + * install,cmake,prebuilt + * `if (CM_REQUIRE_INSTALL == yes)` + - CM script: [install-cmake-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-cmake-prebuilt) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmake/_cm.json) + +___ +### Script output +`cmr "get cmake get-cmake " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_CMAKE_*` +* `CM_MAKE_CORES` +#### New environment keys auto-detected from customize + +* `CM_CMAKE_CACHE_TAGS` +* `CM_MAKE_CORES` \ No newline at end of file diff --git a/script/get-cmake/_cm.json b/script/get-cmake/_cm.json new file mode 100644 index 0000000000..ef3bf700bb --- /dev/null +++ b/script/get-cmake/_cm.json @@ -0,0 +1,40 @@ +{ + "alias": "get-cmake", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "env": { + "CM_REQUIRE_INSTALL": "no" + }, + "deps": [ + { + "tags": "detect,cpu" + } + ], + "new_env_keys": [ + "CM_CMAKE_*", + "CM_MAKE_CORES", + "+PATH" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,cmake,prebuilt" + } + ], + "tags": [ + "get", + "cmake", + "get-cmake" + ], + "print_env_at_the_end" : { + "CM_CMAKE_BIN_WITH_PATH": "Path to the tool" + }, + "uid": "52bf974d791b4fc8" +} diff --git a/script/get-cmake/customize.py b/script/get-cmake/customize.py new file mode 100644 index 0000000000..ebfd0c319a --- /dev/null +++ b/script/get-cmake/customize.py @@ -0,0 +1,60 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake' + + if 'CM_CMAKE_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env':env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_CMAKE_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'cmake version\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_CMAKE_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] >0: return r + + version = r['version'] + found_file_path = env['CM_CMAKE_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_CMAKE_CACHE_TAGS'] = 'version-'+version + + if 'CM_HOST_CPU_TOTAL_CORES' in env: + env['CM_MAKE_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] + + return {'return':0, 'version': version} diff --git a/script/get-cmake/run.bat b/script/get-cmake/run.bat new file mode 100644 index 0000000000..0802ae8282 --- /dev/null +++ b/script/get-cmake/run.bat @@ -0,0 +1,2 @@ +%CM_CMAKE_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-cmake/run.sh b/script/get-cmake/run.sh new file mode 100644 index 0000000000..6d2aeff974 --- /dev/null +++ b/script/get-cmake/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmake_bin=${CM_CMAKE_BIN_WITH_PATH} + +${cmake_bin} --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-cmsis_5/README-extra.md b/script/get-cmsis_5/README-extra.md new file mode 100644 index 0000000000..1f052e7ea7 --- /dev/null +++ b/script/get-cmsis_5/README-extra.md @@ -0,0 +1,5 @@ +# GET-CMSIS_5 +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [CMSIS Version 5](https://github.com/ARM-software/CMSIS_5) and cache it in CM for reuse across other CM scripts. + +## Exported Variables +1. [CMSIS_PATH](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-cmsis_5/customize.py#L23): Location in CM cache where CMSIS_5 git repository is cloned. diff --git a/script/get-cmsis_5/README.md b/script/get-cmsis_5/README.md new file mode 100644 index 0000000000..52c3f1a4b8 --- /dev/null +++ b/script/get-cmsis_5/README.md @@ -0,0 +1,151 @@ +Automatically generated README for this automation recipe: **get-cmsis_5** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-cmsis_5,2258c212b11443f5) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,cmsis,cmsis_5,arm-software* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get cmsis cmsis_5 arm-software" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,cmsis,cmsis_5,arm-software` + +`cm run script --tags=get,cmsis,cmsis_5,arm-software[,variations] ` + +*or* + +`cmr "get cmsis cmsis_5 arm-software"` + +`cmr "get cmsis cmsis_5 arm-software [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cmsis,cmsis_5,arm-software' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,cmsis,cmsis_5,arm-software"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,cmsis,cmsis_5,arm-software) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get cmsis cmsis_5 arm-software[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_recurse-submodules` + - Environment variables: + - *CM_GIT_RECURSE_SUBMODULES*: `--recurse-submodules` + - Workflow: + * `_short-history` + - Environment variables: + - *CM_GIT_DEPTH*: `--depth 10` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_DEPTH: `` +* CM_GIT_PATCH: `no` +* CM_GIT_URL: `https://github.com/ARM-software/CMSIS_5.git` + +
+ +#### Versions +Default version: `custom` + +* `custom` +* `develop` +* `master` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cmsis_5/_cm.json) + +___ +### Script output +`cmr "get cmsis cmsis_5 arm-software [,variations]" -j` +#### New environment keys (filter) + +* `CMSIS*` +#### New environment keys auto-detected from customize diff --git a/script/get-cmsis_5/_cm.json b/script/get-cmsis_5/_cm.json new file mode 100644 index 0000000000..7312e11469 --- /dev/null +++ b/script/get-cmsis_5/_cm.json @@ -0,0 +1,58 @@ +{ + "alias": "get-cmsis_5", + "automation_alias": "script", + "category": "Detection or installation of tools and artifacts", + "automation_uid": "5b4e0237da074764", + "cache": true, + "default_version": "custom", + "deps": [ + { + "tags": "detect,os" + } + ], + "default_env": { + "CM_GIT_DEPTH": "", + "CM_GIT_PATCH": "no", + "CM_GIT_URL": "https://github.com/ARM-software/CMSIS_5.git" + }, + "new_env_keys": [ + "CMSIS*" + ], + "tags": [ + "get", + "cmsis", + "cmsis_5", + "arm-software" + ], + "uid": "2258c212b11443f5", + "variations": { + "recurse-submodules": { + "env": { + "CM_GIT_RECURSE_SUBMODULES": "--recurse-submodules" + } + }, + "short-history": { + "env": { + "CM_GIT_DEPTH": "--depth 10" + } + } + }, + "versions": { + "custom": { + "env": { + "CM_GIT_CHECKOUT": "e5dc19182f6084de32d8dc5a22c84e01210f4995", + "CM_GIT_SHA": "yes" + } + }, + "develop": { + "env": { + "CM_GIT_CHECKOUT": "develop" + } + }, + "master": { + "env": { + "CM_GIT_CHECKOUT": "master" + } + } + } +} diff --git a/script/get-cmsis_5/customize.py b/script/get-cmsis_5/customize.py new file mode 100644 index 0000000000..af9b13c464 --- /dev/null +++ b/script/get-cmsis_5/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + return {'return':0} + +def postprocess(i): + + env = i['env'] + state = i['state'] + env['CMSIS_PATH'] = os.path.join(os.getcwd(), 'cmsis') + + return {'return':0} diff --git a/script/get-cmsis_5/run.sh b/script/get-cmsis_5/run.sh new file mode 100644 index 0000000000..9093c093bc --- /dev/null +++ b/script/get-cmsis_5/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" + +if [ ! -d "cmsis" ]; then + if [ -z ${CM_GIT_SHA} ]; then + echo "Cloning CMSIS_5 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis + if [ "${?}" != "0" ]; then exit 1; fi + else + echo "Cloning CMSIS_5 from ${CM_GIT_URL} with default branch and checkout ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis + if [ "${?}" != "0" ]; then exit 1; fi + cd cmsis + git checkout "${CM_GIT_CHECKOUT}" + if [ "${?}" != "0" ]; then exit 1; fi + fi +fi diff --git a/script/get-compiler-flags/README.md b/script/get-compiler-flags/README.md new file mode 100644 index 0000000000..fe11a854f8 --- /dev/null +++ b/script/get-compiler-flags/README.md @@ -0,0 +1,131 @@ +Automatically generated README for this automation recipe: **get-compiler-flags** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-compiler-flags,31be8b74a69742f8) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-flags)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,compiler-flags* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get compiler-flags" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,compiler-flags` + +`cm run script --tags=get,compiler-flags ` + +*or* + +`cmr "get compiler-flags"` + +`cmr "get compiler-flags " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,compiler-flags' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,compiler-flags"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,compiler-flags) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get compiler-flags" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-flags/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,compiler + * `if (CM_C_COMPILER_BIN != on)` + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-flags/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-flags/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-flags/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-flags/_cm.json) + +___ +### Script output +`cmr "get compiler-flags " -j` +#### New environment keys (filter) + +* `+ CFLAGS` +* `+ CXXFLAGS` +* `+ FFLAGS` +* `+ LDFLAGS` +* `+CM_HOST_OS_DEFAULT_INCLUDE_PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-compiler-flags/_cm.json b/script/get-compiler-flags/_cm.json new file mode 100644 index 0000000000..cc7d1ea00e --- /dev/null +++ b/script/get-compiler-flags/_cm.json @@ -0,0 +1,34 @@ +{ + "alias": "get-compiler-flags", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,cpu" + }, + { + "names": [ + "compiler" + ], + "skip_if_env": { + "CM_C_COMPILER_BIN": [ + "on" + ] + }, + "tags": "get,compiler" + } + ], + "new_env_keys": [ + "+ CFLAGS", + "+ CXXFLAGS", + "+ FFLAGS", + "+ LDFLAGS", + "+CM_HOST_OS_DEFAULT_INCLUDE_PATH" + ], + "tags": [ + "get", + "compiler-flags" + ], + "uid": "31be8b74a69742f8" +} diff --git a/script/get-compiler-flags/customize.py b/script/get-compiler-flags/customize.py new file mode 100644 index 0000000000..23ccbe6472 --- /dev/null +++ b/script/get-compiler-flags/customize.py @@ -0,0 +1,63 @@ +from cmind import utils +import os +import subprocess + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + env['+ CFLAGS'] = [] + env['+ CXXFLAGS'] = [] + env['+ FFLAGS'] = [] + env['+ LDFLAGS'] = [] + + # TBD: add unified flags for Windows + if os_info['platform'] == 'windows': + return {'return':0} + + if env.get("CM_FAST_COMPILATION") in [ "yes", "on", "1" ]: + DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_FAST", "-O3") + DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_FAST", "-O3") # -flto") - this flag is not always available + elif env.get("CM_DEBUG_COMPILATION") in ["yes", "on", "1" ]: + DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEBUG", "-O0") + DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEBUG", "-O0") + else: + DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEFAULT", "-O2") + DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEFAULT", "-O2") + + env['+ CFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") + env['+ CXXFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") + env['+ FFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") + env['+ LDFLAGS'] += DEFAULT_LINKER_FLAGS.split(" ") + + env['+ CFLAGS'] = list(set(env['+ CFLAGS'])) + env['+ CXXFLAGS'] = list(set(env['+ CXXFLAGS'])) + env['+ FFLAGS'] = list(set(env['+ FFLAGS'])) + env['+ LDFLAGS'] = list(set(env['+ LDFLAGS'])) + + sys_cmd = "cpp -v /dev/null -o /dev/null 2>&1" + result = subprocess.check_output(sys_cmd, shell=True).decode("utf-8") + start = False + inc_dir = [] + for out in result.split("\n"): + if "> search starts here" not in out and not start: + continue + if not start: + start = True + continue + if "End of search list" in out: + break + if 'gcc' not in out: + inc_dir.append(out.strip()) + env['+CM_HOST_OS_DEFAULT_INCLUDE_PATH'] = inc_dir + +# if env['CM_C_COMPILER_BIN'] == 'icc': +# if env['CM_CPUINFO_Vendor_ID'] == 'GenuineIntel': +# if int(env['CM_CPUINFO_CPU_family']) >= 0: +# env['+ CFLAGS'] += ["-ipo"] +# if env['CM_C_COMPILER_BIN'] == 'gcc': +# if env['CM_HOST_CPU_VENDOR_ID'] == 'AMD': +# if int(env['CM_HOST_CPU_FAMILY']) >= 0: +# env['+ CFLAGS'] += ["-march=znver2", "-flto"] + + return {'return':0} diff --git a/script/get-compiler-rust/README.md b/script/get-compiler-rust/README.md new file mode 100644 index 0000000000..96eea7341f --- /dev/null +++ b/script/get-compiler-rust/README.md @@ -0,0 +1,122 @@ +Automatically generated README for this automation recipe: **get-compiler-rust** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-compiler-rust,97ffbd9e537b4b59) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,rust-compiler* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get rust-compiler" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,rust-compiler` + +`cm run script --tags=get,rust-compiler ` + +*or* + +`cmr "get rust-compiler"` + +`cmr "get rust-compiler " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,rust-compiler' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,rust-compiler"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,rust-compiler) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get rust-compiler" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust/_cm.yaml)*** + * get,python3 + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-compiler-rust/_cm.yaml) + +___ +### Script output +`cmr "get rust-compiler " -j` +#### New environment keys (filter) + +* `+PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-compiler-rust/_cm.yaml b/script/get-compiler-rust/_cm.yaml new file mode 100644 index 0000000000..ca1a372d9c --- /dev/null +++ b/script/get-compiler-rust/_cm.yaml @@ -0,0 +1,19 @@ +uid: 97ffbd9e537b4b59 +alias: get-compiler-rust + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: Compiler automation + +deps: + - tags: get,python3 + +new_env_keys: +- "+PATH" + +tags: +- get +- rust-compiler diff --git a/script/get-compiler-rust/customize.py b/script/get-compiler-rust/customize.py new file mode 100644 index 0000000000..cd42edf7bf --- /dev/null +++ b/script/get-compiler-rust/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + rust_path = os.path.join(os.path.expanduser('~'),".cargo", "bin") + env['+PATH'] = [ rust_path ] + + return {'return':0} diff --git a/script/get-compiler-rust/run.sh b/script/get-compiler-rust/run.sh new file mode 100644 index 0000000000..4651e2fd04 --- /dev/null +++ b/script/get-compiler-rust/run.sh @@ -0,0 +1,7 @@ +CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} + +${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA} +${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA} + +curl https://sh.rustup.rs -sSf -o tmp.sh +sh tmp.sh -y diff --git a/script/get-conda/README.md b/script/get-conda/README.md new file mode 100644 index 0000000000..a1927ef8de --- /dev/null +++ b/script/get-conda/README.md @@ -0,0 +1,166 @@ +Automatically generated README for this automation recipe: **get-conda** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-conda,6600115f41324c7b) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,conda,get-conda* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get conda get-conda" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,conda,get-conda` + +`cm run script --tags=get,conda,get-conda[,variations] ` + +*or* + +`cmr "get conda get-conda"` + +`cmr "get conda get-conda [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,conda,get-conda' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,conda,get-conda"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,conda,get-conda) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get conda get-conda[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_name.#` + - Environment variables: + - *CM_CONDA_PREFIX_NAME*: `#` + - Workflow: + +
+ + + * Group "**conda-python**" +
+ Click here to expand this section. + + * `_python-3.#` + - Environment variables: + - *CM_CONDA_PYTHON_VERSION*: `3.#` + - Workflow: + * `_python-3.8` + - Environment variables: + - *CM_CONDA_PYTHON_VERSION*: `3.8` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-conda/_cm.json) + +___ +### Script output +`cmr "get conda get-conda [,variations]" -j` +#### New environment keys (filter) + +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_CONDA_BIN_PATH` +* `CM_CONDA_BIN_WITH_PATH` +* `CM_CONDA_LIB_PATH` +* `CM_CONDA_PREFIX` +* `CONDA_PREFIX` +#### New environment keys auto-detected from customize + +* `CM_CONDA_BIN_PATH` +* `CM_CONDA_BIN_WITH_PATH` +* `CM_CONDA_LIB_PATH` +* `CM_CONDA_PREFIX` \ No newline at end of file diff --git a/script/get-conda/_cm.json b/script/get-conda/_cm.json new file mode 100644 index 0000000000..3253a3ff56 --- /dev/null +++ b/script/get-conda/_cm.json @@ -0,0 +1,52 @@ +{ + "alias": "get-conda", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "DevOps automation", + "clean_files": [], + "deps": [ + { + "tags": "detect,os" + } + ], + "new_env_keys": [ + "+PATH", + "+LD_LIBRARY_PATH", + "CM_CONDA_PREFIX", + "CONDA_PREFIX", + "CM_CONDA_BIN_PATH", + "CM_CONDA_BIN_WITH_PATH", + "CM_CONDA_LIB_PATH" + ], + "tags": [ + "get", + "conda", + "get-conda" + ], + "uid": "6600115f41324c7b", + "variations": { + "name.#": { + "env": { + "CM_CONDA_PREFIX_NAME": "#" + }, + "adr": { + "conda-package": { + "tags": "_name.#" + } + } + }, + "python-3.8": { + "group": "conda-python", + "env": { + "CM_CONDA_PYTHON_VERSION": "3.8" + } + }, + "python-3.#": { + "group": "conda-python", + "env": { + "CM_CONDA_PYTHON_VERSION": "3.#" + } + } + } +} diff --git a/script/get-conda/customize.py b/script/get-conda/customize.py new file mode 100644 index 0000000000..d8ef13e343 --- /dev/null +++ b/script/get-conda/customize.py @@ -0,0 +1,95 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + recursion_spaces = i['recursion_spaces'] + + conda_prefix_name = env.get('CM_CONDA_PREFIX_NAME', '') + r = None + file_name = 'conda.exe' if os_info['platform'] == 'windows' else 'conda' + if conda_prefix_name == '': + tmp_path = env.get('CM_CONDA_INSTALL_PATH', env.get('CM_TMP_PATH', '')) + if tmp_path: + x = ';' if os_info['platform'] == 'windows' else ':' + tmp_path+=x + conda_path = os.path.join(os.path.expanduser("~"), "miniconda3", "bin") + if os.path.exists(conda_path): + tmp_path += os.path.join(os.path.expanduser("~"), "miniconda3", "bin") + env['CM_TMP_PATH'] = tmp_path + + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_CONDA_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + else: + env['CM_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3") + bin_dir = 'Scripts' if os_info['platform'] == 'windows' else 'bin' + env['CM_CONDA_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_INSTALL_PATH'], bin_dir, file_name) + + if conda_prefix_name != '' or r['return'] >0 : + if conda_prefix_name != '' or r['return'] == 16: + if conda_prefix_name == '': + if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + return r + + print (recursion_spaces+' # {}'.format(r['error'])) + + # Attempt to run installer + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) + if r['return']>0: return r + + # Grigori: temporal fix - should be generalized/improved above + if os_info['platform'] == 'windows' and env.get('CM_CONDA_BIN_WITH_PATH','')=='': + env['CM_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3") + env['CM_CONDA_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_INSTALL_PATH'], 'Scripts', file_name) + + + else: + found_path = r['found_path'] + env['+PATH'] = [ found_path ] + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'conda\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_CONDA_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + return {'return':0, 'version':r['version']} + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + if r['return'] >0: return r + + conda_bin_path = os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']) + env['CM_CONDA_BIN_PATH'] = conda_bin_path + + conda_prefix = os.path.dirname(conda_bin_path) + env['CM_CONDA_PREFIX'] = conda_prefix + env['CONDA_PREFIX'] = conda_prefix + + conda_lib_path = os.path.join(conda_prefix, "lib") + + if os.path.exists(conda_lib_path): + env['CM_CONDA_LIB_PATH'] = conda_lib_path + env['+LD_LIBRARY_PATH'] = [ conda_lib_path ] + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} diff --git a/script/get-conda/install.bat b/script/get-conda/install.bat new file mode 100644 index 0000000000..2528840d9c --- /dev/null +++ b/script/get-conda/install.bat @@ -0,0 +1,9 @@ +if exist Miniconda3-latest-Windows-x86_64.exe ( + del /Q /S Miniconda3-latest-Windows-x86_64.exe +) + +wget --no-check-certificate https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +start /wait "" Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /D=%CD%\miniconda3 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-conda/install.sh b/script/get-conda/install.sh new file mode 100644 index 0000000000..6d18882858 --- /dev/null +++ b/script/get-conda/install.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh +test $? -eq 0 || exit $? +chmod +x ~/miniconda.sh + +if [ ! -z ${CM_CONDA_PREFIX_NAME} ]; then + CM_CONDA_INSTALL_PATH=$PWD/miniconda3 + rm -rf ${CM_CONDA_INSTALL_PATH} +fi + + +if [ ! -z ${CM_CONDA_INSTALL_PATH} ]; then + ~/miniconda.sh -b -p ${CM_CONDA_INSTALL_PATH} +else + ~/miniconda.sh -b +fi +test $? -eq 0 || exit $? diff --git a/script/get-conda/run.bat b/script/get-conda/run.bat new file mode 100644 index 0000000000..99b9d97d27 --- /dev/null +++ b/script/get-conda/run.bat @@ -0,0 +1 @@ +%CM_CONDA_BIN_WITH_PATH% --version > tmp-ver.out diff --git a/script/get-conda/run.sh b/script/get-conda/run.sh new file mode 100644 index 0000000000..5d61f106f8 --- /dev/null +++ b/script/get-conda/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +${CM_CONDA_BIN_WITH_PATH} --version > tmp-ver.out diff --git a/script/get-croissant/README.md b/script/get-croissant/README.md new file mode 100644 index 0000000000..a32d3da5b2 --- /dev/null +++ b/script/get-croissant/README.md @@ -0,0 +1,128 @@ +Automatically generated README for this automation recipe: **get-croissant** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-croissant,8fd653eac8da4c14) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlcommons,croissant* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlcommons croissant" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlcommons,croissant` + +`cm run script --tags=get,mlcommons,croissant ` + +*or* + +`cmr "get mlcommons croissant"` + +`cmr "get mlcommons croissant " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlcommons,croissant' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlcommons,croissant"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlcommons,croissant) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlcommons croissant" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * git,repo,_repo.https://github.com/mlcommons/croissant + * CM names: `--adr.['git-mlcommons-croissant']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.yaml) + +___ +### Script output +`cmr "get mlcommons croissant " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-croissant/_cm.yaml b/script/get-croissant/_cm.yaml new file mode 100644 index 0000000000..a024189d24 --- /dev/null +++ b/script/get-croissant/_cm.yaml @@ -0,0 +1,30 @@ +alias: get-croissant +uid: 8fd653eac8da4c14 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML datasets + +deps: + - tags: detect,os + + - names: + - python3 + - python + tags: get,python3 + version_min: '3.10' + + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLCOMMONS_CROISSANT_PATH + extra_cache_tags: mlcommons,croissant + names: + - git-mlcommons-croissant + tags: git,repo,_repo.https://github.com/mlcommons/croissant + +tags: + - get + - mlcommons + - croissant diff --git a/script/get-croissant/customize.py b/script/get-croissant/customize.py new file mode 100644 index 0000000000..1ced8a4846 --- /dev/null +++ b/script/get-croissant/customize.py @@ -0,0 +1,16 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + + return {'return':0} diff --git a/script/get-croissant/run.bat b/script/get-croissant/run.bat new file mode 100644 index 0000000000..3177de9f60 --- /dev/null +++ b/script/get-croissant/run.bat @@ -0,0 +1,20 @@ +@echo off + +echo ======================================================= + +cd %CM_MLCOMMONS_CROISSANT_PATH%\python\mlcroissant +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +echo Running %CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] + +%CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +echo Validating Croissant ... + +mlcroissant validate --file ../../datasets/titanic/metadata.json +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= diff --git a/script/get-croissant/run.sh b/script/get-croissant/run.sh new file mode 100644 index 0000000000..dd2c67bb27 --- /dev/null +++ b/script/get-croissant/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +echo "=======================================================" + +cd ${CM_MLCOMMONS_CROISSANT_PATH}/python/mlcroissant +if [ "${?}" != "0" ]; then exit 1; fi + +echo "" +echo "Running ${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]" + +${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git] +if [ "${?}" != "0" ]; then exit 1; fi + +echo "" +echo "Validating Croissant ..." + +mlcroissant validate --file ../../datasets/titanic/metadata.json +if [ "${?}" != "0" ]; then exit 1; fi + +echo "=======================================================" + diff --git a/script/get-cuda-devices/README.md b/script/get-cuda-devices/README.md new file mode 100644 index 0000000000..e9fd635c60 --- /dev/null +++ b/script/get-cuda-devices/README.md @@ -0,0 +1,124 @@ +Automatically generated README for this automation recipe: **get-cuda-devices** + +Category: **CUDA automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-cuda-devices,7a3ede4d3558427a) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,cuda-devices* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get cuda-devices" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,cuda-devices` + +`cm run script --tags=get,cuda-devices ` + +*or* + +`cmr "get cuda-devices"` + +`cmr "get cuda-devices " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cuda-devices' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,cuda-devices"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,cuda-devices) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get cuda-devices" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices/_cm.json)*** + * get,cuda,_toolkit + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda-devices/_cm.json) + +___ +### Script output +`cmr "get cuda-devices " -j` +#### New environment keys (filter) + +* `CM_CUDA_DEVICE_*` +#### New environment keys auto-detected from customize diff --git a/script/get-cuda-devices/_cm.json b/script/get-cuda-devices/_cm.json new file mode 100644 index 0000000000..9b41c8cfe4 --- /dev/null +++ b/script/get-cuda-devices/_cm.json @@ -0,0 +1,36 @@ +{ + "alias": "get-cuda-devices", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "can_force_cache": true, + "cache": false, + "category": "CUDA automation", + "clean_files": [ + "tmp-run.out" + ], + "new_env_keys": [ + "CM_CUDA_DEVICE_*" + ], + "new_state_keys": [ + "cm_cuda_device_prop" + ], + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda,_toolkit" + } + ], + "print_files_if_script_error": [ + "tmp-run.out" + ], + "tags": [ + "get", + "cuda-devices" + ], + "docker": { + "run": false + }, + "uid": "7a3ede4d3558427a" +} diff --git a/script/get-cuda-devices/customize.py b/script/get-cuda-devices/customize.py new file mode 100644 index 0000000000..8236b8029e --- /dev/null +++ b/script/get-cuda-devices/customize.py @@ -0,0 +1,37 @@ +from cmind import utils +import os +import subprocess + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + r = utils.load_txt(file_name='tmp-run.out', + check_if_exists = True, + split = True) + if r['return']>0: return r + + lst = r['list'] + + # properties + p = {} + + for line in lst: + print (line) + + j = line.find(':') + if j>=0: + key = line[:j].strip() + val = line[j+1:].strip() + + p[key] = val + + key_env = 'CM_CUDA_DEVICE_PROP_'+key.upper().replace(' ','_') + env[key_env] = val + + state['cm_cuda_device_prop'] = p + + return {'return':0} diff --git a/script/get-cuda-devices/print_cuda_devices.cu b/script/get-cuda-devices/print_cuda_devices.cu new file mode 100644 index 0000000000..d68a109bad --- /dev/null +++ b/script/get-cuda-devices/print_cuda_devices.cu @@ -0,0 +1,74 @@ +#ifndef WINDOWS + #include +#endif + +#include +#include + +int main(int argc, char *argv[]) +{ + int ndev=0; + int id=0; + cudaError_t error; + cudaDeviceProp features; + + int rtver=0; + int dver=0; + + /* Get number of devices */ + error = cudaGetDeviceCount(&ndev); + if (error != cudaSuccess) { + printf("Error: problem obtaining number of CUDA devices: %d\n", error); + return 1; + } + + /* Iterating over devices */ + for (id=0; id tmp-run.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-cuda-devices/run.sh b/script/get-cuda-devices/run.sh new file mode 100644 index 0000000000..9c774758fc --- /dev/null +++ b/script/get-cuda-devices/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Compile + +rm a.out + +echo "" +echo "Checking compiler version ..." +echo "" + +${CM_NVCC_BIN_WITH_PATH} -V + +echo "" +echo "Compiling program ..." +echo "" + +cd ${CM_TMP_CURRENT_SCRIPT_PATH} + +${CM_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler print_cuda_devices.cu +test $? -eq 0 || exit 1 + +# Return to the original path obtained in CM + +echo "" +echo "Running program ..." +echo "" + +cd - +#${CM_TMP_CURRENT_PATH} + +${CM_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out +test $? -eq 0 || exit 1 diff --git a/script/get-cuda/README-about.md b/script/get-cuda/README-about.md new file mode 100644 index 0000000000..af8e24899b --- /dev/null +++ b/script/get-cuda/README-about.md @@ -0,0 +1,6 @@ +# System dependencies + +* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). +* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). +* Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download). + diff --git a/script/get-cuda/README-extra.md b/script/get-cuda/README-extra.md new file mode 100644 index 0000000000..c075711ff4 --- /dev/null +++ b/script/get-cuda/README-extra.md @@ -0,0 +1,44 @@ +# Get CUDA + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed CUDA on the system +and if not found calls the [install script for CUDA](../script/install-cuda-prebuilt). + +## Exported Variables +* `CM_CUDA_INSTALLED_PATH` +* `CM_CUDA_VERSION` +* `CM_NVCC_BIN_WITH_PATH` +* `CUDA_HOME` +* `CUDA_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. Windows + +# Examples + +## Detect CUDA on Windows + +You may want to install all system dependencies as described [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). + +If Visual Studio and CUDA updated your PATH variable, you should just run the following: +```bash +cm run script "get cuda" +``` + +However, if the PATH variable was not updated, you need to provide path to the cl.exe and nvcc.exe to help CM detect them: + +```bash +cm run script "get cl" --path="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64" +cm run script "get cuda _compiler" --path="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\bin" +``` + +# System dependencies + +* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). +* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). +* (Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download)). + +## Windows + +* ? Download [Microsoft Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist) +* Check [Nvidia installation guide](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html) diff --git a/script/get-cuda/README.md b/script/get-cuda/README.md new file mode 100644 index 0000000000..849e7cb5cc --- /dev/null +++ b/script/get-cuda/README.md @@ -0,0 +1,229 @@ +Automatically generated README for this automation recipe: **get-cuda** + +Category: **CUDA automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-cuda,46d133d9ef92422d) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- + +# System dependencies + +* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). +* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). +* Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download). + + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda` + +`cm run script --tags=get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda[,variations] [--input_flags]` + +*or* + +`cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda"` + +`cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_cudnn` + - Environment variables: + - *CM_CUDA_NEEDS_CUDNN*: `yes` + - Workflow: + 1. ***Read "post_deps" on other CM scripts*** + * get,nvidia,cudnn + * CM names: `--adr.['cudnn']...` + - CM script: [get-cudnn](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cudnn) + * `_package-manager` + - Environment variables: + - *CM_CUDA_PACKAGE_MANAGER_INSTALL*: `yes` + - Workflow: + +
+ + + * Group "**installation-mode**" +
+ Click here to expand this section. + + * `_lib-only` + - Environment variables: + - *CM_CUDA_FULL_TOOLKIT_INSTALL*: `no` + - *CM_TMP_FILE_TO_CHECK_UNIX*: `libcudart.so` + - *CM_TMP_FILE_TO_CHECK_WINDOWS*: `libcudart.dll` + - Workflow: + * **`_toolkit`** (default) + - Environment variables: + - *CM_CUDA_FULL_TOOLKIT_INSTALL*: `yes` + - *CM_TMP_FILE_TO_CHECK_UNIX*: `nvcc` + - *CM_TMP_FILE_TO_CHECK_WINDOWS*: `nvcc.exe` + - Workflow: + +
+ + +#### Default variations + +`_toolkit` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--cudnn_tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value` +* `--cudnn_tar_path=value` → `CM_CUDNN_TAR_FILE_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "cudnn_tar_file":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_CUDA_PATH_LIB_CUDNN_EXISTS: `no` +* CM_REQUIRE_INSTALL: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,cl + * `if (CM_CUDA_FULL_TOOLKIT_INSTALL == yes AND CM_HOST_OS_TYPE == windows)` + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/_cm.json)*** + * install,cuda,prebuilt + * `if (CM_REQUIRE_INSTALL == yes)` + * CM names: `--adr.['install-cuda-prebuilt']...` + - CM script: [install-cuda-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-cuda-prebuilt) + * get,generic-sys-util,_nvidia-cuda-toolkit + * `if (CM_CUDA_PACKAGE_MANAGER_INSTALL == yes)` + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cuda/_cm.json) + +___ +### Script output +`cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `+ LDFLAGS` +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_CUDA_*` +* `CM_NVCC_*` +* `CUDA_HOME` +* `CUDA_PATH` +#### New environment keys auto-detected from customize + +* `CM_CUDA_CACHE_TAGS` +* `CM_CUDA_FULL_TOOLKIT_INSTALL` +* `CM_CUDA_INSTALLED_PATH` +* `CM_CUDA_PATH_BIN` +* `CM_CUDA_PATH_INCLUDE` +* `CM_CUDA_PATH_LIB` +* `CM_CUDA_VERSION` +* `CM_CUDA_VERSION_STRING` +* `CM_NVCC_BIN` \ No newline at end of file diff --git a/script/get-cuda/_cm.json b/script/get-cuda/_cm.json new file mode 100644 index 0000000000..fa5dac580e --- /dev/null +++ b/script/get-cuda/_cm.json @@ -0,0 +1,130 @@ +{ + "alias": "get-cuda", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "CUDA automation", + "clean_files": [], + "default_env": { + "CM_CUDA_PATH_LIB_CUDNN_EXISTS": "no", + "CM_REQUIRE_INSTALL": "no" + }, + "docker": { + }, + "input_mapping": { + "cudnn_tar_path": "CM_CUDNN_TAR_FILE_PATH", + "cudnn_tar_file": "CM_CUDNN_TAR_FILE_PATH" + }, + "new_env_keys": [ + "CUDA_HOME", + "CUDA_PATH", + "CM_CUDA_*", + "CM_NVCC_*", + "+PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH", + "+ LDFLAGS" + ], + "deps": [ + { + "tags": "detect,os" + }, + { + "enable_if_env": { + "CM_CUDA_FULL_TOOLKIT_INSTALL": [ + "yes" + ], + "CM_HOST_OS_TYPE": [ + "windows" + ] + }, + "names": [ + "compiler" + ], + "tags": "get,cl" + } + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "names": [ + "install-cuda-prebuilt" + ], + "reuse_version": true, + "tags": "install,cuda,prebuilt" + }, + { + "enable_if_env": { + "CM_CUDA_PACKAGE_MANAGER_INSTALL": [ + "yes" + ] + }, + "tags": "get,generic-sys-util,_nvidia-cuda-toolkit" + } + ], + "print_files_if_script_error": [ + "tmp-ver.out" + ], + "tags": [ + "get", + "cuda", + "cuda-compiler", + "cuda-lib", + "toolkit", + "lib", + "nvcc", + "get-nvcc", + "get-cuda" + ], + "uid": "46d133d9ef92422d", + "variations": { + "cudnn": { + "env": { + "CM_CUDA_NEEDS_CUDNN": "yes" + }, + "post_deps": [ + { + "names": [ + "cudnn" + ], + "tags": "get,nvidia,cudnn" + } + ] + }, + "lib-only": { + "env": { + "CM_CUDA_FULL_TOOLKIT_INSTALL": "no", + "CM_TMP_FILE_TO_CHECK_UNIX": "libcudart.so", + "CM_TMP_FILE_TO_CHECK_WINDOWS": "libcudart.dll" + }, + "group": "installation-mode" + }, + "package-manager": { + "env": { + "CM_CUDA_PACKAGE_MANAGER_INSTALL": "yes" + } + }, + "toolkit": { + "default": true, + "env": { + "CM_CUDA_FULL_TOOLKIT_INSTALL": "yes", + "CM_TMP_FILE_TO_CHECK_UNIX": "nvcc", + "CM_TMP_FILE_TO_CHECK_WINDOWS": "nvcc.exe" + }, + "group": "installation-mode" + } + }, + "print_env_at_the_end" : { + "CM_CUDA_PATH_LIB_CUDNN_EXISTS": "", + "CM_CUDA_VERSION": "", + "CM_CUDA_VERSION_STRING": "", + "CM_NVCC_BIN_WITH_PATH": "", + "CUDA_HOME": "" + } +} diff --git a/script/get-cuda/customize.py b/script/get-cuda/customize.py new file mode 100644 index 0000000000..95984515bc --- /dev/null +++ b/script/get-cuda/customize.py @@ -0,0 +1,215 @@ +from cmind import utils +import os +import json + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + if os_info['platform'] == 'windows': + file_name = env['CM_TMP_FILE_TO_CHECK_WINDOWS'] + + if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'bin') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths)>0: + tmp_paths = ';'.join(paths) + tmp_paths += ';'+os.environ.get('PATH','') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + file_name = env['CM_TMP_FILE_TO_CHECK_UNIX'] + + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + system_path = os.environ.get('PATH') + if system_path: + system_path = system_path + ":" + env['CM_TMP_PATH'] = system_path + '/usr/local/cuda/bin:/usr/cuda/bin:/usr/local/cuda-11/bin:/usr/cuda-11/bin:/usr/local/cuda-12/bin:/usr/cuda-12/bin:/usr/local/packages/cuda' + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + env_key = 'CM_NVCC_BIN_WITH_PATH' + path_env_key = 'PATH' + else: + env_key = 'CM_CUDA_RT_WITH_PATH' + path_env_key = 'LD_LIBRARY_PATH' + env['CM_TMP_ENV_KEY'] = env_key + + if env_key not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': path_env_key, + 'detect_version':True, + 'env_path_key':env_key, + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if os_info['platform'] == 'windows': + return r + + if r['return'] == 16 and env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + + +def detect_version(i): + env = i['env'] + if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + return detect_version_nvcc(i) + else: + return detect_version_cuda_lib(i) + +def detect_version_nvcc(i): + r = i['automation'].parse_version({'match_text': r'release\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_CUDA_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def detect_version_cuda_lib(i): + + env = i['env'] + print(env) + cuda_rt_file_path = env['CM_CUDA_RT_WITH_PATH'] + cuda_lib_path=os.path.dirname(cuda_rt_file_path) + cuda_path = os.path.abspath(os.path.join(cuda_lib_path, os.pardir)) + + cuda_version = "version-missing" + + version_json = os.path.join(cuda_path, "version.json") + if os.path.exists(version_json): + with open(version_json) as f: + version_info = json.load(f) + cuda_version_info = version_info.get('cuda_cudart') + if cuda_version_info: + cuda_version = cuda_version_info.get('version') + + + env['CM_CUDA_VERSION'] = cuda_version + version = cuda_version + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + r = detect_version(i) + if r['return'] >0: return r + version = r['version'] + + env['CM_CUDA_CACHE_TAGS'] = 'version-'+version + + found_file_path = env[env['CM_TMP_ENV_KEY']] + + if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + + cuda_path_bin = os.path.dirname(found_file_path) + env['CM_CUDA_PATH_BIN'] = cuda_path_bin + + cuda_path = os.path.dirname(cuda_path_bin) + env['CM_CUDA_INSTALLED_PATH'] = cuda_path + env['CM_NVCC_BIN'] = os.path.basename(found_file_path) + + else: + parent_path = os.path.dirname(found_file_path) #We traverse backwards until we find a path with include dir + env['CM_CUDA_PATH_LIB'] = parent_path + parent_path = os.path.dirname(parent_path) + while os.path.isdir(parent_path): + if os.path.exists(os.path.join(parent_path, "include")): + print("Path is "+parent_path) + found_path = parent_path + cuda_path = found_path + env['CM_CUDA_INSTALLED_PATH'] = cuda_path + break + else: + parent_path = os.path.dirname(parent_path) + + if 'CM_CUDA_INSTALLED_PATH' not in env: + return {'return': 1, 'error': "No CUDA installation path with an include directory is found"} + + env['CUDA_HOME']=cuda_path + env['CUDA_PATH']=cuda_path + + cuda_system_path_install = False + system_path = os.environ.get('PATH') + if os.path.join(cuda_path, "bin") in system_path.split(":"): + cuda_system_path_install = True + + # Check extra paths + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + env[key] = [] + + ## Include + cuda_path_include = os.path.join(cuda_path, 'include') + if os.path.isdir(cuda_path_include): + if os_info['platform'] != 'windows' and not cuda_system_path_install: + env['+C_INCLUDE_PATH'].append(cuda_path_include) + env['+CPLUS_INCLUDE_PATH'].append(cuda_path_include) + + env['CM_CUDA_PATH_INCLUDE'] = cuda_path_include + + ## Lib + if os_info['platform'] == 'windows': + extra_dir='x64' + else: + extra_dir='' + + for d in ['lib64', 'lib']: + cuda_path_lib = os.path.join(cuda_path, d) + + if extra_dir != '': + cuda_path_lib = os.path.join(cuda_path_lib, extra_dir) + + if os.path.isdir(cuda_path_lib): + if not cuda_system_path_install: + env['+LD_LIBRARY_PATH'].append(cuda_path_lib) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(cuda_path_lib) + + env['CM_CUDA_PATH_LIB'] = cuda_path_lib + break + + if '+ LDFLAGS' not in env: + env['+ LDFLAGS'] = [] + if 'CM_CUDA_PATH_LIB' in env and not cuda_system_path_install: + x = env['CM_CUDA_PATH_LIB'] + if ' ' in x: x='"'+x+'"' + env['+ LDFLAGS'].append("-L"+x) + + env['CM_CUDA_VERSION_STRING'] = "cu"+env['CM_CUDA_VERSION'].replace(".", "") + + return {'return':0, 'version': version} diff --git a/script/get-cuda/run.bat b/script/get-cuda/run.bat new file mode 100644 index 0000000000..89af970ac8 --- /dev/null +++ b/script/get-cuda/run.bat @@ -0,0 +1,3 @@ +"%CM_NVCC_BIN_WITH_PATH%" -V > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-cuda/run.sh b/script/get-cuda/run.sh new file mode 100644 index 0000000000..aac0fee360 --- /dev/null +++ b/script/get-cuda/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash +if [[ ${CM_CUDA_FULL_TOOLKIT_INSTALL} == "no" ]]; then + exit 0 +fi +nvcc_bin=${CM_NVCC_BIN_WITH_PATH:-nvcc} + +${nvcc_bin} -V > tmp-ver.out +test $? -eq 0 || exit 1 + +if [[ ${nvcc_bin} == "nvcc" ]]; then + nvcc_path=`which nvcc` + echo "CM_NVCC_BIN_WITH_PATH=${nvcc_path}" >> tmp-run-env.out + test $? -eq 0 || exit 1 +fi diff --git a/script/get-cudnn/README-extra.md b/script/get-cudnn/README-extra.md new file mode 100644 index 0000000000..3746808138 --- /dev/null +++ b/script/get-cudnn/README-extra.md @@ -0,0 +1,3 @@ +# TBD + +We need to add detection of cuDNN version on Windows, Linux and MacOS diff --git a/script/get-cudnn/README.md b/script/get-cudnn/README.md new file mode 100644 index 0000000000..75ec5c3634 --- /dev/null +++ b/script/get-cudnn/README.md @@ -0,0 +1,168 @@ +Automatically generated README for this automation recipe: **get-cudnn** + +Category: **CUDA automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-cudnn,d73ee19baee14df8) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,cudnn,nvidia* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get cudnn nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,cudnn,nvidia` + +`cm run script --tags=get,cudnn,nvidia [--input_flags]` + +*or* + +`cmr "get cudnn nvidia"` + +`cmr "get cudnn nvidia " [--input_flags]` + + + +#### Input Flags + +* --**input**=Full path to the installed cuDNN library +* --**tar_file**=Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn) + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cudnn,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,cudnn,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,cudnn,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get cudnn nvidia" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input=value` → `CM_INPUT=value` +* `--tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_SUDO: `sudo` +* CM_INPUT: `` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,cuda + * `if (CM_CUDA_PATH_LIB != on OR CM_CUDA_PATH_INCLUDE != on)` + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-cudnn/_cm.json) + +___ +### Script output +`cmr "get cudnn nvidia " [--input_flags] -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_CUDA_PATH_INCLUDE_CUDNN` +* `CM_CUDA_PATH_LIB_CUDNN` +* `CM_CUDA_PATH_LIB_CUDNN_EXISTS` +* `CM_CUDNN_*` +#### New environment keys auto-detected from customize + +* `CM_CUDA_PATH_INCLUDE_CUDNN` +* `CM_CUDA_PATH_LIB_CUDNN` +* `CM_CUDA_PATH_LIB_CUDNN_EXISTS` +* `CM_CUDNN_VERSION` \ No newline at end of file diff --git a/script/get-cudnn/_cm.json b/script/get-cudnn/_cm.json new file mode 100644 index 0000000000..15fe03988f --- /dev/null +++ b/script/get-cudnn/_cm.json @@ -0,0 +1,52 @@ +{ + "alias": "get-cudnn", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "CUDA automation", + "clean_files": [], + "default_env": { + "CM_SUDO": "sudo", + "CM_INPUT": "" + }, + "input_mapping": { + "input": "CM_INPUT", + "tar_file": "CM_CUDNN_TAR_FILE_PATH" + }, + "input_description": { + "input": {"desc":"Full path to the installed cuDNN library"}, + "tar_file": {"desc":"Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn)"} + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ "cuda" ], + "tags": "get,cuda", + "skip_if_env": { + "CM_CUDA_PATH_LIB": [ "on" ], + "CM_CUDA_PATH_INCLUDE": [ "on" ] + } + } + ], + "new_env_keys": [ + "CM_CUDNN_*", + "CM_CUDA_PATH_LIB_CUDNN", + "CM_CUDA_PATH_INCLUDE_CUDNN", + "CM_CUDA_PATH_LIB_CUDNN_EXISTS", + "+PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ], + "tags": [ + "get", + "cudnn", + "nvidia" + ], + "uid": "d73ee19baee14df8", + "docker": { + } +} diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py new file mode 100644 index 0000000000..12c45e5a04 --- /dev/null +++ b/script/get-cudnn/customize.py @@ -0,0 +1,142 @@ +from cmind import utils +import os +import tarfile +import shutil + +def preprocess(i): + + recursion_spaces = i['recursion_spaces'] + + os_info = i['os_info'] + + env = i['env'] + + env['CM_TMP_RUN_COPY_SCRIPT'] = "no" + + + # If TAR file is not explicitly specified, search + if env.get('CM_CUDNN_TAR_FILE_PATH','')=='': + + cuda_path_lib = env.get('CM_CUDA_PATH_LIB') + + if os_info['platform'] == 'windows': + extra_pre='' + extra_ext='lib' + else: + extra_pre='lib' + extra_ext='so' + + libfilename = extra_pre + 'cudnn.' +extra_ext + env['CM_CUDNN_VERSION'] = 'vdetected' + + if os.path.exists(os.path.join(cuda_path_lib, libfilename)): + env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] + return {'return': 0} + + if env.get('CM_TMP_PATH', '').strip() != '': + path = env.get('CM_TMP_PATH') + if os.path.exists(os.path.join(path, libfilename)): + env['CM_CUDA_PATH_LIB_CUDNN'] = path + return {'return': 0} + + if env.get('CM_INPUT','').strip()=='': + if os_info['platform'] == 'windows': + if env.get('CM_TMP_PATH','').strip()=='': + # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'lib') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths)>0: + tmp_paths = ';'.join(paths) + tmp_paths += ';'+os.environ.get('PATH','') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + cm_tmp_path = env.get('CM_TMP_PATH','').strip() + if cm_tmp_path!='': + cm_tmp_path+=':' + cm_tmp_path+='/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' + env['CM_TMP_PATH'] = cm_tmp_path + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if(os.path.exists(lib_path)): + env['CM_TMP_PATH']+=':'+lib_path + + r = i['automation'].find_artifact({'file_name': libfilename, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'LD_LIBRARY_PATH', + 'detect_version':False, + 'env_path_key':'CM_CUDA_PATH_LIB_CUDNN', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if os_info['platform'] == 'windows': + return r + + if r['return'] == 16: + env['CM_TMP_REQUIRE_INSTALL'] = "yes" + else: + return r + else: + return {'return':0} + + if env.get('CM_CUDNN_TAR_FILE_PATH','')=='': + return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'} + + print ('Untaring file - can take some time ...') + + my_tar = tarfile.open(os.path.expanduser(env['CM_CUDNN_TAR_FILE_PATH'])) + folder_name = my_tar.getnames()[0] + if not os.path.exists(os.path.join(os.getcwd(), folder_name)): + my_tar.extractall() + my_tar.close() + + import re + version_match = re.match(r'cudnn-.*?-(\d.\d.\d.\d)', folder_name) + if not version_match: + return {'return': 1, 'error': 'Extracted CUDNN folder does not seem proper - Version information missing'} + version = version_match.group(1) + env['CM_CUDNN_VERSION'] = version + + inc_path = os.path.join(os.getcwd(), folder_name, "include") + lib_path = os.path.join(os.getcwd(), folder_name, "lib") + cuda_inc_path = env['CM_CUDA_PATH_INCLUDE'] + cuda_lib_path = env['CM_CUDA_PATH_LIB'] + env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] + env['CM_CUDA_PATH_INCLUDE_CUDNN'] = env['CM_CUDA_PATH_INCLUDE'] + + try: + print("Copying cudnn include files to {}(CUDA_INCLUDE_PATH)".format(cuda_inc_path)) + shutil.copytree(inc_path, cuda_inc_path, dirs_exist_ok = True) + print("Copying cudnn lib files to {}CUDA_LIB_PATH".format(cuda_lib_path)) + shutil.copytree(lib_path, cuda_lib_path, dirs_exist_ok = True) + except: + #Need to copy to system path via run.sh + env['CM_TMP_RUN_COPY_SCRIPT'] = "yes" + env['CM_TMP_INC_PATH'] = inc_path + env['CM_TMP_LIB_PATH'] = lib_path + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + version = env['CM_CUDNN_VERSION'] + env['CM_CUDA_PATH_LIB_CUDNN_EXISTS']='yes' + + return {'return':0, 'version': version} diff --git a/script/get-cudnn/run.sh b/script/get-cudnn/run.sh new file mode 100644 index 0000000000..e2cb00fb0f --- /dev/null +++ b/script/get-cudnn/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +if [ ${CM_TMP_RUN_COPY_SCRIPT} == "yes" ]; then + cmd="${CM_SUDO} cp ${CM_TMP_INC_PATH}/*.h ${CM_CUDA_PATH_INCLUDE}/" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 + + cmd="${CM_SUDO} cp -P ${CM_TMP_LIB_PATH}/libcudnn* ${CM_CUDA_PATH_LIB}/" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +fi diff --git a/script/get-dataset-cifar10/README.md b/script/get-dataset-cifar10/README.md new file mode 100644 index 0000000000..607aa105ac --- /dev/null +++ b/script/get-dataset-cifar10/README.md @@ -0,0 +1,166 @@ +Automatically generated README for this automation recipe: **get-dataset-cifar10** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-cifar10,2f0c0bb3663b4ed7) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,cifar10,image-classification,validation,training* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset cifar10 image-classification validation training" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,cifar10,image-classification,validation,training` + +`cm run script --tags=get,dataset,cifar10,image-classification,validation,training[,variations] ` + +*or* + +`cmr "get dataset cifar10 image-classification validation training"` + +`cmr "get dataset cifar10 image-classification validation training [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,cifar10,image-classification,validation,training' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,cifar10,image-classification,validation,training"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,cifar10,image-classification,validation,training) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset cifar10 image-classification validation training[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_tiny` + - Environment variables: + - *CM_DATASET_CONVERT_TO_TINYMLPERF*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,tinymlperf,src + - CM script: [get-mlperf-tiny-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-tiny-src) + * get,src,eembc,energy-runner + - CM script: [get-mlperf-tiny-eembc-energy-runner-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src) + +
+ + + * Group "**data_format**" +
+ Click here to expand this section. + + * **`_python`** (default) + - Environment variables: + - *CM_DATASET*: `CIFAR10` + - *CM_DATASET_FILENAME*: `cifar-10-python.tar.gz` + - *CM_DATASET_FILENAME1*: `cifar-10-python.tar` + - *CM_DATASET_CIFAR10*: `https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz` + - Workflow: + +
+ + +#### Default variations + +`_python` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cifar10/_cm.json) + +___ +### Script output +`cmr "get dataset cifar10 image-classification validation training [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize diff --git a/script/get-dataset-cifar10/_cm.json b/script/get-dataset-cifar10/_cm.json new file mode 100644 index 0000000000..6297ff6b3f --- /dev/null +++ b/script/get-dataset-cifar10/_cm.json @@ -0,0 +1,56 @@ +{ + "alias": "get-dataset-cifar10", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "new_env_keys": [ + "CM_DATASET_*" + ], + "deps": [ + { + "tags": "detect,os" + } + ], + "tags": [ + "get", + "dataset", + "cifar10", + "image-classification", + "validation", + "training" + ], + "variations": { + "python": { + "group": "data_format", + "default": true, + "env": { + "CM_DATASET": "CIFAR10", + "CM_DATASET_FILENAME": "cifar-10-python.tar.gz", + "CM_DATASET_FILENAME1": "cifar-10-python.tar", + "CM_DATASET_CIFAR10": "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" + } + }, + "tiny": { + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,tinymlperf,src" + }, + { + "tags": "get,src,eembc,energy-runner" + } + ], + "env": { + "CM_DATASET_CONVERT_TO_TINYMLPERF": "yes" + } + } + }, + "uid": "2f0c0bb3663b4ed7" +} diff --git a/script/get-dataset-cifar10/customize.py b/script/get-dataset-cifar10/customize.py new file mode 100644 index 0000000000..b624357402 --- /dev/null +++ b/script/get-dataset-cifar10/customize.py @@ -0,0 +1,16 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + return {'return': 0} + +def postprocess(i): + env = i['env'] + + variation_tags = i.get('variation_tags',[]) + + return {'return': 0} diff --git a/script/get-dataset-cifar10/requirements.txt b/script/get-dataset-cifar10/requirements.txt new file mode 100644 index 0000000000..bf9d458b18 --- /dev/null +++ b/script/get-dataset-cifar10/requirements.txt @@ -0,0 +1,47 @@ +absl-py==0.11.0 +astunparse==1.6.3 +cachetools==4.2.1 +certifi==2020.12.5 +chardet==4.0.0 +cycler==0.10.0 +flatbuffers==1.12 +gast==0.4.0 +google-auth==1.27.0 +google-auth-oauthlib==0.4.2 +google-pasta==0.2.0 +grpcio==1.34.0 +h5py==3.1.0 +idna==2.10 +imageio==2.9.0 +joblib==1.1.1 +Keras-Preprocessing==1.1.2 +kiwisolver==1.3.1 +Markdown==3.3.3 +matplotlib==3.3.4 +numpy==1.19.5 +oauthlib==3.1.0 +opencv-python==4.5.1.48 +opt-einsum==3.3.0 +Pillow==8.1.0 +protobuf==3.14.0 +pyasn1==0.4.8 +pyasn1-modules==0.2.8 +pyparsing==2.4.7 +python-dateutil==2.8.1 +PyYAML==5.4.1 +requests==2.25.1 +requests-oauthlib==1.3.0 +rsa==4.7.1 +scikit-learn==0.24.1 +scipy==1.6.0 +six==1.15.0 +tensorboard==2.5.0 +tensorboard-plugin-wit==1.8.0 +tensorflow==2.5.0 +tensorflow-estimator==2.5.0 +termcolor==1.1.0 +threadpoolctl==2.1.0 +typing-extensions==3.7.4.3 +urllib3==1.26.3 +Werkzeug==1.0.1 +wrapt==1.12.1 diff --git a/script/get-dataset-cifar10/run.bat b/script/get-dataset-cifar10/run.bat new file mode 100644 index 0000000000..8f54fb86e2 --- /dev/null +++ b/script/get-dataset-cifar10/run.bat @@ -0,0 +1,48 @@ +wget -nc %CM_DATASET_CIFAR10% --no-check-certificate +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +del /Q /S %CM_DATASET_FILENAME1% + +gzip -d %CM_DATASET_FILENAME% +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +tar -xvf %CM_DATASET_FILENAME1% +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +del /Q /S %CM_DATASET_FILENAME1% + +echo CM_DATASET_PATH=%CD%\cifar-10-batches-py > tmp-run-env.out +echo CM_DATASET_CIFAR10_PATH=%CD%\cifar-10-batches-py >> tmp-run-env.out + +if "%CM_DATASET_CONVERT_TO_TINYMLPERF%" == "yes" ( + echo. + echo Copying TinyMLPerf convertor ... + echo. + + copy /B /Y %CM_MLPERF_TINY_TRAINING_IC%\* . + + echo. + echo Installing Python requirements ... + echo. + + %CM_PYTHON_BIN% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + echo. + echo Converting ... + echo. + + %CM_PYTHON_BIN% perf_samples_loader.py + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + copy /B /Y y_labels.csv perf_samples + + echo CM_DATASET_CIFAR10_TINYMLPERF_PATH=%CD%\perf_samples >> tmp-run-env.out + + echo. + echo Copying to EEMBC runner user space ... + echo. + + copy /B /Y perf_samples\* %CM_EEMBC_ENERGY_RUNNER_DATASETS%\ic01 +) + diff --git a/script/get-dataset-cifar10/run.sh b/script/get-dataset-cifar10/run.sh new file mode 100644 index 0000000000..a113a2e4da --- /dev/null +++ b/script/get-dataset-cifar10/run.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +wget -nc ${CM_DATASET_CIFAR10} --no-check-certificate +test $? -eq 0 || exit 1 + +rm -rf ${CM_DATASET_FILENAME1} + +gzip -d ${CM_DATASET_FILENAME} +test $? -eq 0 || exit 1 + +tar -xvf ${CM_DATASET_FILENAME1} +test $? -eq 0 || exit 1 + +rm -rf ${CM_DATASET_FILENAME} + +echo "CM_DATASET_PATH=$PWD/cifar-10-batches-py" > tmp-run-env.out +echo "CM_DATASET_CIFAR10_PATH=$PWD/cifar-10-batches-py" >> tmp-run-env.out + +if [ "${CM_DATASET_CONVERT_TO_TINYMLPERF}" == "yes" ]; then + echo "" + echo "Copying TinyMLPerf convertor ..." + echo "" + + cp -rf ${CM_MLPERF_TINY_TRAINING_IC}/* . + + echo "" + echo "Installing Python requirements ..." + echo "" + + ${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + if [ "${?}" != "0" ]; then exit 1; fi + + echo "" + echo "Converting in $PWD ..." + echo "" + + ${CM_PYTHON_BIN} perf_samples_loader.py + if [ "${?}" != "0" ]; then exit 1; fi + + cp -rf y_labels.csv perf_samples + + echo "CM_DATASET_CIFAR10_TINYMLPERF_PATH=$PWD/perf_samples" >> tmp-run-env.out + + echo "" + echo "Copying to EEMBC runner user space ..." + echo "" + + cp -rf perf_samples/* ${CM_EEMBC_ENERGY_RUNNER_DATASETS}/ic01 +fi + diff --git a/script/get-dataset-cnndm/README.md b/script/get-dataset-cnndm/README.md new file mode 100644 index 0000000000..5e37effee9 --- /dev/null +++ b/script/get-dataset-cnndm/README.md @@ -0,0 +1,176 @@ +Automatically generated README for this automation recipe: **get-dataset-cnndm** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-cnndm,aed298c156e24257) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,gpt-j,cnndm,cnn-dailymail,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset gpt-j cnndm cnn-dailymail original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,gpt-j,cnndm,cnn-dailymail,original` + +`cm run script --tags=get,dataset,gpt-j,cnndm,cnn-dailymail,original[,variations] ` + +*or* + +`cmr "get dataset gpt-j cnndm cnn-dailymail original"` + +`cmr "get dataset gpt-j cnndm cnn-dailymail original [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,gpt-j,cnndm,cnn-dailymail,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,gpt-j,cnndm,cnn-dailymail,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,gpt-j,cnndm,cnn-dailymail,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset gpt-j cnndm cnn-dailymail original[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_intel` + - Workflow: + * `_intel,validation` + - Environment variables: + - *CM_CNNDM_INTEL_VARIATION*: `yes` + - Workflow: + +
+ + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_CALIBRATION*: `yes` + - Workflow: + * **`_validation`** (default) + - Environment variables: + - *CM_DATASET_CALIBRATION*: `no` + - Workflow: + +
+ + +#### Default variations + +`_validation` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET_CALIBRATION: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/_cm.json)*** + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * mlperf,inference,source + * `if (CM_CNNDM_INTEL_VARIATION != yes)` + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_package.simplejson + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_datasets + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.tokenizers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/_cm.json) + 1. ***Run native script if exists*** + * [run-intel.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/run-intel.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-cnndm/_cm.json) + +___ +### Script output +`cmr "get dataset gpt-j cnndm cnn-dailymail original [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-dataset-cnndm/_cm.json b/script/get-dataset-cnndm/_cm.json new file mode 100644 index 0000000000..36af72cdc5 --- /dev/null +++ b/script/get-dataset-cnndm/_cm.json @@ -0,0 +1,87 @@ +{ + "alias": "get-dataset-cnndm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "default_env": { + "CM_DATASET_CALIBRATION": "no" + }, + "deps": [ + { + "tags": "get,sys-utils-cm" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3", + "version_max": "3.9.999" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlperf,inference,source", + "skip_if_env": { + "CM_CNNDM_INTEL_VARIATION": [ "yes" ] + } + }, + { + "tags": "get,generic-python-lib,_package.simplejson" + }, + { + "tags": "get,generic-python-lib,_datasets" + }, + { + "tags": "get,generic-python-lib,_package.tokenizers" + }, + { + "tags": "get,generic-python-lib,_numpy" + } + ], + "env": { + "CM_DATASET": "CNNDM" + }, + "tags": [ + "get", + "dataset", + "gpt-j", + "cnndm", + "cnn-dailymail", + "original" + ], + "uid": "aed298c156e24257", + "variations": { + "calibration": { + "new_env_keys": [ + "CM_CALIBRATION_DATASET_PATH", + "CM_CALIBRATION_DATASET_CNNDM_PATH" + ], + "env": { + "CM_DATASET_CALIBRATION": "yes" + }, + "group": "dataset-type" + }, + "validation": { + "new_env_keys": [ + "CM_DATASET_PATH", + "CM_DATASET_EVAL_PATH", + "CM_DATASET_CNNDM_EVAL_PATH" + ], + "default": true, + "env": { + "CM_DATASET_CALIBRATION": "no" + }, + "group": "dataset-type" + }, + "intel": { + }, + "intel,validation": { + "env": { + "CM_CNNDM_INTEL_VARIATION": "yes" + } + } + } +} diff --git a/script/get-dataset-cnndm/customize.py b/script/get-dataset-cnndm/customize.py new file mode 100644 index 0000000000..27363d8000 --- /dev/null +++ b/script/get-dataset-cnndm/customize.py @@ -0,0 +1,29 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + if env.get('CM_CNNDM_INTEL_VARIATION', '') == 'yes': + i['run_script_input']['script_name'] = "run-intel" + else: + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + + return {'return': 0} + +def postprocess(i): + env = i['env'] + + if env.get('CM_DATASET_CALIBRATION','') == "no": + env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install') + env['CM_DATASET_EVAL_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_eval.json') + env['CM_DATASET_CNNDM_EVAL_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_eval.json') + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PATH'] + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_dailymail_calibration.json') + env['CM_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_dailymail_calibration.json') + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_CALIBRATION_DATASET_PATH'] + + return {'return': 0} diff --git a/script/get-dataset-cnndm/run-intel.sh b/script/get-dataset-cnndm/run-intel.sh new file mode 100644 index 0000000000..067f158a56 --- /dev/null +++ b/script/get-dataset-cnndm/run-intel.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR=${PWD} +rm -rf install +mkdir -p install + +export DATASET_CNNDM_PATH=${CUR}/install + +wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/gptj-99/pytorch-cpu/download-dataset.py +test $? -eq 0 || exit 1 + +cmd="${CM_PYTHON_BIN_WITH_PATH} download-dataset.py --split validation --output-dir ${DATASET_CNNDM_PATH}" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit 1 diff --git a/script/get-dataset-cnndm/run.sh b/script/get-dataset-cnndm/run.sh new file mode 100644 index 0000000000..f9aa3864b0 --- /dev/null +++ b/script/get-dataset-cnndm/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +CUR=${PWD} +mkdir -p install +export DATASET_CNNDM_PATH=${CUR}/install + +cd ${CM_MLPERF_INFERENCE_SOURCE} +cd language/gpt-j + +if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} download_cnndm.py" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +else + cmd="${CM_PYTHON_BIN_WITH_PATH} prepare-calibration.py --calibration-list-file calibration-list.txt --output-dir ${DATASET_CNNDM_PATH}" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +fi +test $? -eq 0 || exit 1 diff --git a/script/get-dataset-coco/README-extra.md b/script/get-dataset-coco/README-extra.md new file mode 100644 index 0000000000..9f19d2e8d7 --- /dev/null +++ b/script/get-dataset-coco/README-extra.md @@ -0,0 +1,95 @@ +# CM interface to download or detect COCO data sets + +This CM automation recipe helps to download or detect [COCO datasets](https://cocodataset.org) +and register them in the CM cache with various environment variables +to be reused in CM workflows and other projects. + +Supported versions: +* 2017 val/train +* 2014 val/train + +## Use-cases + +* https://github.com/mlcommons/abtf-ssd-pytorch + +## Download COCO dataset and register in CM cache + +```bash +cmr "get coco dataset" +cmr "get coco dataset _val _2017" +cmr "get coco dataset _train _2017" +``` + +You can find this data set in the CM cache using the following command: + +```bash +cm show cache "get coco dataset" +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "get coco dataset _val _2017" -j +``` + +```json + "new_env": { + "CM_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations", + "CM_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips", + "CM_DATASET_COCO_VERSION": "2017", + "CM_DATASET_COCO_TYPE": "val", + "CM_DATASET_COCO_SIZE": "complete", + "CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\annotations_trainval2017.zip", + "CM_DATASET_COCO_ANNOTATIONS_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\annotations", + "CM_DATASET_COCO_DATA_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\val2017.zip", + "CM_DATASET_COCO_DATA_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\val2017", + "CM_DATASET_COCO_MD5SUM_ANN": "f4bbac642086de4f52a3fdda2de5fa2c", + "CM_DATASET_COCO_MD5SUM_DATA": "442b8da7639aecaf257c1dceb8ba8c80", + "CM_DATASET_COCO_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", + "CM_DATASET_COCO_TYPE_AND_VERSION": "val2017", + "CM_DATASET_COCO_URL_ANNOTATIONS_FULL": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip", + "CM_DATASET_COCO_URL_DATA_FULL": "http://images.cocodataset.org/zips/val2017.zip", + "CM_DATASET_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", + "CM_DATASET_PATH_ROOT": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07" + }, +``` + +#### Input flags and equivalent environment variables + +* `--from` - where to find dataset archive files instead of downloading them +* `--to` - where to extract dataset files +* `--path` - where to pick up extracted dataset files +* `--store` - where to keep downloaded files + +#### Variations + +* Dataset type: `_val` | `_train` +* Dataset year: `2017` | `2014` + + +## Detect already installed COCO dataset + +```bash +cmr "get coco dataset" --path={PATH to the installed dataset}" +``` + +CM script will attempt to automatically detect the type (val/train) and version (2014/2017) +of the dataset files. + +## Install dataset from already downloaded archives + +```bash +cmr "get coco dataset _val _2017" --from=d:\Work2\COCO-2017-val -j +``` + +where `--from` points to the COCO dataset zip files already downloaded from the server. +It is useful when all files are already downloaded and saved for common use. + + +## Download and store dataset files locally + +```bash +cmr "get coco dataset _val _2017" --to=d:\Downloads\COCO-2017-val --store=d:\Downloads +``` diff --git a/script/get-dataset-coco/README.md b/script/get-dataset-coco/README.md new file mode 100644 index 0000000000..37dd544a7b --- /dev/null +++ b/script/get-dataset-coco/README.md @@ -0,0 +1,215 @@ +Automatically generated README for this automation recipe: **get-dataset-coco** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-coco,c198e1f60ac6445c) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,object-detection,coco* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset object-detection coco" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,object-detection,coco` + +`cm run script --tags=get,dataset,object-detection,coco[,variations] [--input_flags]` + +*or* + +`cmr "get dataset object-detection coco"` + +`cmr "get dataset object-detection coco [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,object-detection,coco' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,object-detection,coco"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,object-detection,coco) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset object-detection coco[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**size**" +
+ Click here to expand this section. + + * **`_complete`** (default) + - Environment variables: + - *CM_DATASET_COCO_SIZE*: `complete` + - Workflow: + * `_small` + - Environment variables: + - *CM_DATASET_COCO_SIZE*: `small` + - Workflow: + +
+ + + * Group "**type**" +
+ Click here to expand this section. + + * `_train` + - Environment variables: + - *CM_DATASET_COCO_TYPE*: `train` + - Workflow: + * **`_val`** (default) + - Environment variables: + - *CM_DATASET_COCO_TYPE*: `val` + - Workflow: + +
+ + + * Group "**version**" +
+ Click here to expand this section. + + * **`_2017`** (default) + - Environment variables: + - *CM_DATASET_COCO_VERSION*: `2017` + - Workflow: + +
+ + +#### Default variations + +`_2017,_complete,_val` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--from=value` → `CM_FROM=value` +* `--home=value` → `CM_HOME_DIR=value` +* `--store=value` → `CM_STORE=value` +* `--to=value` → `CM_TO=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "from":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco/_cm.json)*** + * download-and-extract,file,_wget,_extract + * `if (CM_DATASET_COCO_DETECTED != yes)` + * CM names: `--adr.['get-dataset-coco-data', '746e5dad5e784ad6']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + * download-and-extract,file,_wget,_extract + * `if (CM_DATASET_COCO_DETECTED != yes)` + * CM names: `--adr.['get-dataset-coco-annotations', 'edb6cd092ff64171']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco/_cm.json) + +___ +### Script output +`cmr "get dataset object-detection coco [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_COCO*` +* `CM_DATASET_PATH` +* `CM_DATASET_PATH_ROOT` +#### New environment keys auto-detected from customize + +* `CM_DATASET_COCO_ANNOTATIONS_PATH` +* `CM_DATASET_COCO_DATA_PATH` +* `CM_DATASET_COCO_DETECTED` +* `CM_DATASET_COCO_MD5SUM_ANN` +* `CM_DATASET_COCO_MD5SUM_DATA` +* `CM_DATASET_COCO_PATH` +* `CM_DATASET_COCO_TYPE` +* `CM_DATASET_COCO_TYPE_AND_VERSION` +* `CM_DATASET_COCO_URL_ANNOTATIONS_FULL` +* `CM_DATASET_COCO_URL_DATA_FULL` +* `CM_DATASET_COCO_VERSION` +* `CM_DATASET_PATH` +* `CM_DATASET_PATH_ROOT` \ No newline at end of file diff --git a/script/get-dataset-coco/_cm.json b/script/get-dataset-coco/_cm.json new file mode 100644 index 0000000000..a61234ad0c --- /dev/null +++ b/script/get-dataset-coco/_cm.json @@ -0,0 +1,137 @@ +{ + "alias": "get-dataset-coco", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + "CM_DATASET": "COCO", + "CM_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations", + "CM_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips" + }, + "input_mapping": { + "from": "CM_FROM", + "store": "CM_STORE", + "to": "CM_TO", + "home": "CM_HOME_DIR" + }, + "new_env_keys": [ + "CM_DATASET_COCO*", + "CM_DATASET_PATH", + "CM_DATASET_PATH_ROOT" + ], + "prehook_deps": [ + { + "env": { + "CM_DOWNLOAD_CHECKSUM": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_DATASET_COCO_DATA_DOWNLOAD_PATH", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_DATASET_COCO_DATA_PATH" + }, + "force_cache": true, + "names": [ + "get-dataset-coco-data", + "746e5dad5e784ad6" + ], + "skip_if_fake_run": true, + "skip_if_env": { + "CM_DATASET_COCO_DETECTED": [ + "yes" + ] + }, + "tags": "download-and-extract,file,_wget,_extract", + "update_tags_from_env_with_prefix": { + "_url.": [ + "CM_DATASET_COCO_URL_DATA_FULL" + ] + }, + "verify": false + }, + { + "env": { + "CM_DOWNLOAD_CHECKSUM": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_DATASET_COCO_ANNOTATIONS_PATH" + }, + "force_cache": true, + "names": [ + "get-dataset-coco-annotations", + "edb6cd092ff64171" + ], + "skip_if_fake_run": true, + "skip_if_env": { + "CM_DATASET_COCO_DETECTED": [ + "yes" + ] + }, + "tags": "download-and-extract,file,_wget,_extract", + "update_tags_from_env_with_prefix": { + "_url.": [ + "CM_DATASET_COCO_URL_ANNOTATIONS_FULL" + ] + }, + "verify": false + } + ], + "tags": [ + "get", + "dataset", + "object-detection", + "coco" + ], + "uid": "c198e1f60ac6445c", + "variations": { + "2017": { + "default": true, + "env": { + "CM_DATASET_COCO_VERSION": "2017" + }, + "group": "version" + }, + "complete": { + "default": true, + "env": { + "CM_DATASET_COCO_SIZE": "complete" + }, + "group": "size" + }, + "small": { + "env": { + "CM_DATASET_COCO_SIZE": "small" + }, + "group": "size" + }, + "train": { + "env": { + "CM_DATASET_COCO_TYPE": "train" + }, + "group": "type" + }, + "val": { + "default": true, + "env": { + "CM_DATASET_COCO_TYPE": "val" + }, + "group": "type" + } + }, + "docker": { + "skip_run_cmd":"no", + "mount_current_dir":"yes", + "input_paths": [ + "store", + "from", + "to" + ], + "skip_input_for_fake_run": [ + "store", + "from", + "to" + ] + } +} diff --git a/script/get-dataset-coco/customize.py b/script/get-dataset-coco/customize.py new file mode 100644 index 0000000000..78ced4d7bd --- /dev/null +++ b/script/get-dataset-coco/customize.py @@ -0,0 +1,203 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + # CM script internal variables + variation_tags = i.get('variation_tags',[]) + automation = i['automation'] + env = i['env'] + meta = i['meta'] + quiet = (env.get('CM_QUIET', False) == 'yes') + + # Check if path is there to detect existing data set + detected = False + path = env.get('CM_TMP_PATH','') + if path!='': + if not os.path.isdir(path): + return {'return':1, 'error':'path to dataset "{}" doesn\'t exist'.format(path)} + + # Check which dataset + p = os.path.join(path, 'annotations') + if os.path.isdir(p): + for d in [('val2017','val','2017'), + ('train2017','train','2017')]: + p = os.path.join(path, d[0]) + + if os.path.isdir(p): + tp = d[1] + ver = d[2] + detected = True + break + + if not detected: + return {'return':1, 'error':'COCO dataset is not detected in "{}"'.format(path)} + + print ('') + print ('Detected COCO dataset {} {}'.format(tp,ver)) + + env['CM_DATASET_COCO_DETECTED'] = 'yes' + env['CM_DATASET_COCO_PATH'] = path + else: + ver = env['CM_DATASET_COCO_VERSION'] + tp = env['CM_DATASET_COCO_TYPE'] + + # Prepare URL + size=env.get('CM_DATASET_COCO_SIZE','') + if size=='small' and tp=='val' and ver=='2017': + # We prepared a small version with 50 images for val 2017 + + filename_data = 'val2017_small.zip' + filename_annotation = 'annotations_val2017_small.zip' + + url_data_full = 'https://www.dropbox.com/scl/fi/whokyb7b7hyjqqotruyqb/{}?rlkey=hhgt4xtir91ej0nro6h69l22s&dl=0'.format(filename_data) + url_ann_full = 'https://www.dropbox.com/scl/fi/bu41y62v9zqhee8w7q6z3/{}?rlkey=seqtgozldkc0ztu76kbd47p5w&dl=0'.format(filename_annotation) + + else: + url_data = env['CM_DATASET_COCO_URL_DATA'] + url_ann = env['CM_DATASET_COCO_URL_ANNOTATIONS'] + + filename_data = tp + ver + '.zip' + filename_annotation = 'annotations_trainval' + ver + '.zip' + + url_data_full = url_data + '/' + filename_data + url_ann_full = url_ann + '/' + filename_annotation + + # Add extra tags with type and version to "download-and-extract" deps to be able to reuse them + # Add "from" and "to" to "download-and-extract" deps + download_extra_cache_tags='dataset,coco,data,'+tp+','+ver + + dae_input_data = { + 'extra_cache_tags':download_extra_cache_tags + } + dae_input_annotation = { + 'extra_cache_tags':download_extra_cache_tags + } + + path_from = env.get('CM_FROM', '') + if path_from!='': + path_from_data = os.path.join(path_from, filename_data) + if not os.path.isfile(path_from_data): + return {'return':1, 'error':'File {} not found'.format(path_from_data)} + dae_input_data['local_path'] = path_from_data + + path_from_annotation = os.path.join(path_from, filename_annotation) + if not os.path.isfile(path_from_annotation): + return {'return':1, 'error':'File {} not found'.format(path_from_annotation)} + dae_input_annotation['local_path'] = path_from_annotation + + path_to = env.get('CM_TO', '') + if path_to!='': + dae_input_data['extract_path'] = path_to + dae_input_annotation['extract_path'] = path_to + + path_store = env.get('CM_STORE', '') + if path_store!='': + dae_input_data['download_path'] = path_store + dae_input_data['tags'] = '_keep' + dae_input_annotation['download_path'] = path_store + dae_input_annotation['tags'] = '_keep' + + + r = automation.update_deps({'deps':meta['prehook_deps'], + 'update_deps':{ + '746e5dad5e784ad6': dae_input_data, + 'edb6cd092ff64171': dae_input_annotation + } + }) + if r['return']>0: return r + + # Prepare environment variables + env['CM_DATASET_COCO_VERSION'] = ver + env['CM_DATASET_COCO_TYPE'] = tp + env['CM_DATASET_COCO_TYPE_AND_VERSION'] = tp+ver + env['CM_DATASET_COCO_URL_DATA_FULL'] = url_data_full + env['CM_DATASET_COCO_URL_ANNOTATIONS_FULL'] = url_ann_full + + # Check MD5SUM + md5sum_data = '' + md5sum_ann = '' + + if ver == '2017': + if tp == 'val': + if size == 'small': + md5sum_data = '16fab985a33afa66beeb987f68c2023c' + md5sum_ann = '78c0cfd9fc32c825d4ae693fd0d91407' + else: + md5sum_data = '442b8da7639aecaf257c1dceb8ba8c80' + md5sum_ann = 'f4bbac642086de4f52a3fdda2de5fa2c' + + if md5sum_data != '': + env['CM_DATASET_COCO_MD5SUM_DATA'] = md5sum_data + if md5sum_ann != '': + env['CM_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann + + if not detected: + print ('') + print ('URL for data: {}'.format(url_data_full)) + print ('URL for annotations: {}'.format(url_ann_full)) + + # Add version and type to tags + extra_cache_tags = [] + for tag in [ver, tp]: + if tag not in variation_tags: + extra_cache_tags.append(tag) + + return {'return':0, 'add_extra_cache_tags':extra_cache_tags} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + tp_ver = env['CM_DATASET_COCO_TYPE_AND_VERSION'] + + path_to = env.get('CM_TO','') + + # Check if detected or downloaded + if env.get('CM_DATASET_COCO_DETECTED', '').lower() == 'yes' or path_to!='': + path_all = env['CM_DATASET_COCO_PATH'] if path_to=='' else path_to + + env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) + env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(path_all, 'annotations') + else: + path_all = os.getcwd() + + # Moving 2 directories to 1 place + + path_data = env['CM_DATASET_COCO_DATA_PATH'] + path_ann = env['CM_DATASET_COCO_ANNOTATIONS_PATH'] + + print ('') + print (path_all) + print ('') + + path_data_full = os.path.join(path_data, tp_ver) + path_ann_full = os.path.join(path_ann, 'annotations') + + if os_info['platform'] == 'windows': + # Moving to this directory since can't make symbolic links + command1 = ' move /y ' + path_data_full + ' ' + tp_ver + command2 = ' move /y ' + path_ann_full + ' annotations' + + env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) + env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(path_all, 'annotations') + else: + # Make soft links from data and annotations into 1 directory + # (standard way for COCO) + + command1 = ' ln -s ' + path_data_full + ' ' + tp_ver + command2 = ' ln -s ' + path_ann_full + ' annotations' + + for command in [command1, command2]: + print (command) + os.system(command) + + + env['CM_DATASET_COCO_PATH'] = path_all + env['CM_DATASET_PATH'] = path_all + env['CM_DATASET_PATH_ROOT'] = path_all + + return {'return': 0} diff --git a/script/get-dataset-coco2014/README.md b/script/get-dataset-coco2014/README.md new file mode 100644 index 0000000000..0e84cca95c --- /dev/null +++ b/script/get-dataset-coco2014/README.md @@ -0,0 +1,205 @@ +Automatically generated README for this automation recipe: **get-dataset-coco2014** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-coco2014,3f7ad9d42f4040f8) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,coco2014,object-detection,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset coco2014 object-detection original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,coco2014,object-detection,original` + +`cm run script --tags=get,dataset,coco2014,object-detection,original[,variations] ` + +*or* + +`cmr "get dataset coco2014 object-detection original"` + +`cmr "get dataset coco2014 object-detection original [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,coco2014,object-detection,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,coco2014,object-detection,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,coco2014,object-detection,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset coco2014 object-detection original[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**annotations**" +
+ Click here to expand this section. + + * `_custom-annotations` + - Environment variables: + - *CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS*: `yes` + - Workflow: + * **`_default-annotations`** (default) + - Environment variables: + - *CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS*: `no` + - Workflow: + +
+ + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_CALIBRATION*: `yes` + - Workflow: + * **`_validation`** (default) + - Environment variables: + - *CM_DATASET_CALIBRATION*: `no` + - Workflow: + +
+ + + * Group "**size**" +
+ Click here to expand this section. + + * **`_50`** (default) + - Environment variables: + - *CM_DATASET_SIZE*: `50` + - Workflow: + * `_500` + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - Workflow: + * `_full` + - Environment variables: + - *CM_DATASET_SIZE*: `` + - Workflow: + * `_size.#` + - Environment variables: + - *CM_DATASET_SIZE*: `#` + - Workflow: + +
+ + +#### Default variations + +`_50,_default-annotations,_validation` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET_CALIBRATION: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_package.tqdm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * mlperf,inference,source + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/run.sh) + 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml)*** + * get,coco2014,annotations + * `if (CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS == yes)` + - *Warning: no scripts found* + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml) + +___ +### Script output +`cmr "get dataset coco2014 object-detection original [,variations]" -j` +#### New environment keys (filter) + +* `CM_CALIBRATION_DATASET_PATH` +* `CM_DATASET_ANNOTATIONS_DIR_PATH` +* `CM_DATASET_ANNOTATIONS_FILE_PATH` +* `CM_DATASET_PATH` +* `CM_DATASET_PATH_ROOT` +#### New environment keys auto-detected from customize + +* `CM_CALIBRATION_DATASET_PATH` +* `CM_DATASET_PATH` +* `CM_DATASET_PATH_ROOT` \ No newline at end of file diff --git a/script/get-dataset-coco2014/_cm.yaml b/script/get-dataset-coco2014/_cm.yaml new file mode 100644 index 0000000000..b6017a2e8c --- /dev/null +++ b/script/get-dataset-coco2014/_cm.yaml @@ -0,0 +1,89 @@ +alias: get-dataset-coco2014 +uid: 3f7ad9d42f4040f8 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML datasets +category_sort: 8500 + +tags: +- get +- dataset +- coco2014 +- object-detection +- original + +default_env: + CM_DATASET_CALIBRATION: 'no' + +deps: + +- names: + - python + - python3 + tags: get,python3 + +- tags: get,generic-python-lib,_package.tqdm + +- force_env_keys: + - CM_GIT_* + names: + - inference-src + tags: mlperf,inference,source + version: master + +env: + CM_DATASET: COCO2014 + +new_env_keys: +- CM_DATASET_PATH +- CM_DATASET_PATH_ROOT +- CM_DATASET_ANNOTATIONS_DIR_PATH +- CM_DATASET_ANNOTATIONS_FILE_PATH +- CM_CALIBRATION_DATASET_PATH + +posthook_deps: +- enable_if_env: + CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: + - 'yes' + tags: get,coco2014,annotations + +variations: + '50': + default: true + env: + CM_DATASET_SIZE: '50' + group: size + '500': + env: + CM_DATASET_SIZE: '500' + group: size + calibration: + env: + CM_DATASET_CALIBRATION: 'yes' + group: dataset-type + custom-annotations: + env: + CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'yes' + group: annotations + default-annotations: + default: true + env: + CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'no' + group: annotations + full: + env: + CM_DATASET_SIZE: '' + group: size + size.#: + env: + CM_DATASET_SIZE: '#' + group: size + validation: + default: true + env: + CM_DATASET_CALIBRATION: 'no' + group: dataset-type diff --git a/script/get-dataset-coco2014/customize.py b/script/get-dataset-coco2014/customize.py new file mode 100644 index 0000000000..c48f71616d --- /dev/null +++ b/script/get-dataset-coco2014/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + + run_dir = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools") + + env['CM_RUN_DIR'] = run_dir + + return {'return': 0} + +def postprocess(i): + env = i['env'] + if env.get('CM_DATASET_CALIBRATION','') == "no": + env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') + env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') + env['CM_DATASET_CAPTIONS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'captions') + env['CM_DATASET_LATENTS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'latents') + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'calibration', 'data') + + return {'return': 0} diff --git a/script/get-dataset-coco2014/run.bat b/script/get-dataset-coco2014/run.bat new file mode 100644 index 0000000000..9ac62e6ad8 --- /dev/null +++ b/script/get-dataset-coco2014/run.bat @@ -0,0 +1,21 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +if not exist install mkdir install + +set INSTALL_DIR=%CUR_DIR%\install + +cd %CM_RUN_DIR% + +if not "%CM_DATASET_SIZE%" == "" ( + set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42 +) else ( + set MAX_IMAGES= +) + +rem TBD - next file doesn't exist in the latest inference - need to check/fix ... + +%CM_PYTHON_BIN% download-coco-2014.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-dataset-coco2014/run.sh b/script/get-dataset-coco2014/run.sh new file mode 100644 index 0000000000..71843358fc --- /dev/null +++ b/script/get-dataset-coco2014/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CUR=${PWD} +mkdir -p install +INSTALL_DIR=${CUR}/install + +cd ${CM_RUN_DIR} + +if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then + if [ ! -z ${CM_DATASET_SIZE} ]; then + max_images=" -m ${CM_DATASET_SIZE}" + else + max_images="" + fi + cmd="./download-coco-2014.sh -d ${INSTALL_DIR} ${max_images}" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +else + cmd="./download-coco-2014-calibration.sh -d ${INSTALL_DIR}" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +fi +cd ${INSTALL_DIR} + +test $? -eq 0 || exit 1 diff --git a/script/get-dataset-criteo/README-extra.md b/script/get-dataset-criteo/README-extra.md new file mode 100644 index 0000000000..345a59cfea --- /dev/null +++ b/script/get-dataset-criteo/README-extra.md @@ -0,0 +1,9 @@ +# Get Criteo Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the 24 days of Criteo dataset for MLPerf inference using DLRM. + +## Exported Variables +* `CM_DATASET_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-dataset-criteo/README.md b/script/get-dataset-criteo/README.md new file mode 100644 index 0000000000..8b60049013 --- /dev/null +++ b/script/get-dataset-criteo/README.md @@ -0,0 +1,156 @@ +Automatically generated README for this automation recipe: **get-dataset-criteo** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-criteo,194a47d908714897) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-criteo)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,criteo,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset criteo original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,criteo,original` + +`cm run script --tags=get,dataset,criteo,original[,variations] [--input_flags]` + +*or* + +`cmr "get dataset criteo original"` + +`cmr "get dataset criteo original [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,criteo,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,criteo,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,criteo,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset criteo original[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_backup` + - Environment variables: + - *CM_BACKUP_ZIPS*: `yes` + - Workflow: + * `_fake` + - Environment variables: + - *CM_CRITEO_FAKE*: `yes` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--criteo_path=value` → `CM_CRITEO_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "criteo_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BACKUP_ZIPS: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-criteo/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-criteo/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-criteo/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-criteo/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-criteo/_cm.json) + +___ +### Script output +`cmr "get dataset criteo original [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET*` +#### New environment keys auto-detected from customize diff --git a/script/get-dataset-criteo/_cm.json b/script/get-dataset-criteo/_cm.json new file mode 100644 index 0000000000..61b3d298e9 --- /dev/null +++ b/script/get-dataset-criteo/_cm.json @@ -0,0 +1,38 @@ +{ + "alias": "get-dataset-criteo", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML datasets", + "cache": true, + "env": { + "CM_DATASET": "terabyte" + }, + "new_env_keys": [ + "CM_DATASET*" + ], + "tags": [ + "get", + "dataset", + "criteo", + "original" + ], + "input_mapping": { + "criteo_path": "CM_CRITEO_PATH" + }, + "default_env": { + "CM_BACKUP_ZIPS": "no" + }, + "uid": "194a47d908714897", + "variations": { + "backup": { + "env": { + "CM_BACKUP_ZIPS": "yes" + } + }, + "fake": { + "env": { + "CM_CRITEO_FAKE": "yes" + } + } + } +} diff --git a/script/get-dataset-criteo/run.sh b/script/get-dataset-criteo/run.sh new file mode 100644 index 0000000000..32a1c777f3 --- /dev/null +++ b/script/get-dataset-criteo/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if [ ! -z ${CM_CRITEO_PATH+x} ]; then + echo "CM_DATASET_PATH=${CM_CRITEO_PATH}" > tmp-run-env.out + test $? -eq 0 || exit 1 + exit 0 +fi + +CUR=$PWD +if [[ ${CM_CRITEO_FAKE} == "yes" ]]; then + cd ${CM_MLPERF_INFERENCE_DLRM_PATH}/pytorch/tools + bash ./make_fake_criteo.sh terabyte + mv ./fake_criteo/* $CUR/ + cd $CUR +else + curl -O -C - https://storage.googleapis.com/criteo-cail-datasets/day_{`seq -s "," 0 23`}.gz + test $? -eq 0 || exit 1 + + if [ ${CM_BACKUP_ZIPS:-no} == "yes" ]; then + mkdir backup + cp -r *.gz backup/ + fi + yes n | gunzip -k day_{0..23}.gz +fi + +echo "CM_DATASET_PATH=$PWD" > tmp-run-env.out diff --git a/script/get-dataset-imagenet-aux/README.md b/script/get-dataset-imagenet-aux/README.md new file mode 100644 index 0000000000..584f971127 --- /dev/null +++ b/script/get-dataset-imagenet-aux/README.md @@ -0,0 +1,157 @@ +Automatically generated README for this automation recipe: **get-dataset-imagenet-aux** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-imagenet-aux,bb2c6dd8c8c64217) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-aux)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,aux,dataset-aux,image-classification,imagenet-aux* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get aux dataset-aux image-classification imagenet-aux" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,aux,dataset-aux,image-classification,imagenet-aux` + +`cm run script --tags=get,aux,dataset-aux,image-classification,imagenet-aux[,variations] ` + +*or* + +`cmr "get aux dataset-aux image-classification imagenet-aux"` + +`cmr "get aux dataset-aux image-classification imagenet-aux [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aux,dataset-aux,image-classification,imagenet-aux' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,aux,dataset-aux,image-classification,imagenet-aux"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,aux,dataset-aux,image-classification,imagenet-aux) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get aux dataset-aux image-classification imagenet-aux[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_2012` + - Environment variables: + - *CM_DATASET_AUX_VER*: `2012` + - Workflow: + +
+ + + * Group "**download-source**" +
+ Click here to expand this section. + + * `_from.berkeleyvision` + - Environment variables: + - *CM_WGET_URL*: `http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz` + - Workflow: + * **`_from.dropbox`** (default) + - Environment variables: + - *CM_WGET_URL*: `https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz` + - Workflow: + +
+ + +#### Default variations + +`_from.dropbox` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-aux/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-aux/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-aux/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-aux/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-aux/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-aux/_cm.json) + +___ +### Script output +`cmr "get aux dataset-aux image-classification imagenet-aux [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_AUX_*` +#### New environment keys auto-detected from customize diff --git a/script/get-dataset-imagenet-aux/_cm.json b/script/get-dataset-imagenet-aux/_cm.json new file mode 100644 index 0000000000..286af2583d --- /dev/null +++ b/script/get-dataset-imagenet-aux/_cm.json @@ -0,0 +1,44 @@ +{ + "alias": "get-dataset-imagenet-aux", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "new_env_keys": [ + "CM_DATASET_AUX_*" + ], + "tags": [ + "get", + "aux", + "dataset-aux", + "image-classification", + "imagenet-aux" + ], + "uid": "bb2c6dd8c8c64217", + "variations": { + "2012": { + "env": { + "CM_DATASET_AUX_VER": "2012" + } + }, + "from.berkeleyvision": { + "group": "download-source", + "base": [ + "2012" + ], + "env": { + "CM_WGET_URL": "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz" + } + }, + "from.dropbox": { + "group": "download-source", + "default": true, + "base": [ + "2012" + ], + "env": { + "CM_WGET_URL": "https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz" + } + } + } +} diff --git a/script/get-dataset-imagenet-aux/run.bat b/script/get-dataset-imagenet-aux/run.bat new file mode 100644 index 0000000000..f045ee6898 --- /dev/null +++ b/script/get-dataset-imagenet-aux/run.bat @@ -0,0 +1,16 @@ +echo. + +wget -nc %CM_WGET_URL% --no-check-certificate +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +mkdir data + +gzip -d caffe_ilsvrc12.tar.gz +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +tar -C data -xvf caffe_ilsvrc12.tar +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +del /Q /S caffe_ilsvrc12.tar + +echo CM_DATASET_AUX_PATH=%CD%\data > tmp-run-env.out diff --git a/script/get-dataset-imagenet-aux/run.sh b/script/get-dataset-imagenet-aux/run.sh new file mode 100644 index 0000000000..a05e8538c5 --- /dev/null +++ b/script/get-dataset-imagenet-aux/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +echo "" + +wget -4 -nc ${CM_WGET_URL} --no-check-certificate +test $? -eq 0 || exit 1 + +mkdir data + +tar -C data -xvzf caffe_ilsvrc12.tar.gz +test $? -eq 0 || exit 1 + +rm -rf caffe_ilsvrc12.tar.gz + +echo "CM_DATASET_AUX_PATH=$PWD/data" > tmp-run-env.out diff --git a/script/get-dataset-imagenet-calibration/README.md b/script/get-dataset-imagenet-calibration/README.md new file mode 100644 index 0000000000..c90dd01b73 --- /dev/null +++ b/script/get-dataset-imagenet-calibration/README.md @@ -0,0 +1,148 @@ +Automatically generated README for this automation recipe: **get-dataset-imagenet-calibration** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-imagenet-calibration,30361fad3dff49ff) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-calibration)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,imagenet,calibration* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset imagenet calibration" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,imagenet,calibration` + +`cm run script --tags=get,dataset,imagenet,calibration[,variations] ` + +*or* + +`cmr "get dataset imagenet calibration"` + +`cmr "get dataset imagenet calibration [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,imagenet,calibration' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,imagenet,calibration"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,imagenet,calibration) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset imagenet calibration[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**calibration-option**" +
+ Click here to expand this section. + + * **`_mlperf.option1`** (default) + - Environment variables: + - *CM_MLPERF_IMAGENET_CALIBRATION_OPTION*: `one` + - *CM_DOWNLOAD_CHECKSUM*: `f09719174af3553119e2c621157773a6` + - Workflow: + * `_mlperf.option2` + - Environment variables: + - *CM_MLPERF_IMAGENET_CALIBRATION_OPTION*: `two` + - *CM_DOWNLOAD_CHECKSUM*: `e44582af00e3b4fc3fac30efd6bdd05f` + - Workflow: + +
+ + +#### Default variations + +`_mlperf.option1` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-calibration/_cm.yaml)*** + * download,file + * CM names: `--adr.['calibration-file-downloader']...` + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-calibration/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-calibration/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-calibration/_cm.yaml) + +___ +### Script output +`cmr "get dataset imagenet calibration [,variations]" -j` +#### New environment keys (filter) + +* `CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-dataset-imagenet-calibration/_cm.yaml b/script/get-dataset-imagenet-calibration/_cm.yaml new file mode 100644 index 0000000000..741d7e2055 --- /dev/null +++ b/script/get-dataset-imagenet-calibration/_cm.yaml @@ -0,0 +1,46 @@ +uid: 30361fad3dff49ff +alias: get-dataset-imagenet-calibration + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: "AI/ML datasets" + +deps: + - tags: download,file + force_cache: true + extra_cache_tags: imagenet-calibration,imagenet,calibration + names: + - calibration-file-downloader + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH + +new_env_keys: +- CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH + +tags: +- get +- dataset +- imagenet +- calibration + +variations: + mlperf.option1: + group: calibration-option + default: true + env: + CM_MLPERF_IMAGENET_CALIBRATION_OPTION: one + CM_DOWNLOAD_CHECKSUM: f09719174af3553119e2c621157773a6 + adr: + calibration-file-downloader: + tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_1.txt + mlperf.option2: + group: calibration-option + env: + CM_MLPERF_IMAGENET_CALIBRATION_OPTION: two + CM_DOWNLOAD_CHECKSUM: e44582af00e3b4fc3fac30efd6bdd05f + adr: + calibration-file-downloader: + tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_2.txt diff --git a/script/get-dataset-imagenet-helper/README.md b/script/get-dataset-imagenet-helper/README.md new file mode 100644 index 0000000000..08677f4e41 --- /dev/null +++ b/script/get-dataset-imagenet-helper/README.md @@ -0,0 +1,122 @@ +Automatically generated README for this automation recipe: **get-dataset-imagenet-helper** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-imagenet-helper,a6c3c321d07742f9) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-helper)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,imagenet,helper,imagenet-helper* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get imagenet helper imagenet-helper" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,imagenet,helper,imagenet-helper` + +`cm run script --tags=get,imagenet,helper,imagenet-helper ` + +*or* + +`cmr "get imagenet helper imagenet-helper"` + +`cmr "get imagenet helper imagenet-helper " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,imagenet,helper,imagenet-helper' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,imagenet,helper,imagenet-helper"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,imagenet,helper,imagenet-helper) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get imagenet helper imagenet-helper" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-helper/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-helper/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-helper/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-helper/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-helper/_cm.json) + +___ +### Script output +`cmr "get imagenet helper imagenet-helper " -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +* `CM_DATASET_IMAGENET_HELPER_PATH` +#### New environment keys auto-detected from customize + +* `CM_DATASET_IMAGENET_HELPER_PATH` \ No newline at end of file diff --git a/script/get-dataset-imagenet-helper/_cm.json b/script/get-dataset-imagenet-helper/_cm.json new file mode 100644 index 0000000000..c0256d5004 --- /dev/null +++ b/script/get-dataset-imagenet-helper/_cm.json @@ -0,0 +1,18 @@ +{ + "alias": "get-dataset-imagenet-helper", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML datasets", + "cache": true, + "new_env_keys": [ + "+PYTHONPATH", + "CM_DATASET_IMAGENET_HELPER_PATH" + ], + "tags": [ + "get", + "imagenet", + "helper", + "imagenet-helper" + ], + "uid": "a6c3c321d07742f9" +} diff --git a/script/get-dataset-imagenet-helper/customize.py b/script/get-dataset-imagenet-helper/customize.py new file mode 100644 index 0000000000..b1b7d90de6 --- /dev/null +++ b/script/get-dataset-imagenet-helper/customize.py @@ -0,0 +1,12 @@ +from cmind import utils +import os + +def postprocess(i): + env = i['env'] + + script_path = env['CM_TMP_CURRENT_SCRIPT_PATH'] + + env['CM_DATASET_IMAGENET_HELPER_PATH'] = script_path + env['+PYTHONPATH'] = [ script_path ] + + return {'return':0} diff --git a/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py new file mode 100644 index 0000000000..d28c502fc5 --- /dev/null +++ b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 + +import os +import numpy as np + + +## Processing in batches: +# +BATCH_SIZE = int(os.getenv('CM_BATCH_SIZE', 1)) + + +## Model properties: +# +MODEL_IMAGE_HEIGHT = int(os.getenv('CM_ML_MODEL_IMAGE_HEIGHT', + os.getenv('CM_ONNX_MODEL_IMAGE_HEIGHT', + os.getenv('CM_TENSORFLOW_MODEL_IMAGE_HEIGHT', + '')))) +MODEL_IMAGE_WIDTH = int(os.getenv('CM_ML_MODEL_IMAGE_WIDTH', + os.getenv('CM_ONNX_MODEL_IMAGE_WIDTH', + os.getenv('CM_TENSORFLOW_MODEL_IMAGE_WIDTH', + '')))) +MODEL_IMAGE_CHANNELS = int(os.getenv('CM_ML_MODEL_IMAGE_CHANNELS', 3)) +MODEL_DATA_LAYOUT = os.getenv('CM_ML_MODEL_DATA_LAYOUT', 'NCHW') +MODEL_COLOURS_BGR = os.getenv('CM_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in ('YES', 'yes', 'ON', 'on', '1') +MODEL_INPUT_DATA_TYPE = os.getenv('CM_ML_MODEL_INPUT_DATA_TYPE', 'float32') +MODEL_DATA_TYPE = os.getenv('CM_ML_MODEL_DATA_TYPE', '(unknown)') +MODEL_USE_DLA = os.getenv('CM_ML_MODEL_USE_DLA', 'NO') in ('YES', 'yes', 'ON', 'on', '1') +MODEL_MAX_BATCH_SIZE = int(os.getenv('CM_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE)) + + +## Internal processing: +# +INTERMEDIATE_DATA_TYPE = np.float32 # default for internal conversion +#INTERMEDIATE_DATA_TYPE = np.int8 # affects the accuracy a bit + + +## Image normalization: +# +MODEL_NORMALIZE_DATA = os.getenv('CM_ML_MODEL_NORMALIZE_DATA') in ('YES', 'yes', 'ON', 'on', '1') +MODEL_NORMALIZE_LOWER = float(os.getenv('CM_ML_MODEL_NORMALIZE_LOWER', -1.0)) +MODEL_NORMALIZE_UPPER = float(os.getenv('CM_ML_MODEL_NORMALIZE_UPPER', 1.0)) +SUBTRACT_MEAN = os.getenv('CM_ML_MODEL_SUBTRACT_MEANS', 'YES') in ('YES', 'yes', 'ON', 'on', '1') +GIVEN_CHANNEL_MEANS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_MEANS', '') +if GIVEN_CHANNEL_MEANS: + GIVEN_CHANNEL_MEANS = np.fromstring(GIVEN_CHANNEL_MEANS, dtype=np.float32, sep=' ').astype(INTERMEDIATE_DATA_TYPE) + if MODEL_COLOURS_BGR: + GIVEN_CHANNEL_MEANS = GIVEN_CHANNEL_MEANS[::-1] # swapping Red and Blue colour channels + +GIVEN_CHANNEL_STDS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_STDS', '') +if GIVEN_CHANNEL_STDS: + GIVEN_CHANNEL_STDS = np.fromstring(GIVEN_CHANNEL_STDS, dtype=np.float32, sep=' ').astype(INTERMEDIATE_DATA_TYPE) + if MODEL_COLOURS_BGR: + GIVEN_CHANNEL_STDS = GIVEN_CHANNEL_STDS[::-1] # swapping Red and Blue colour channels + + + +## ImageNet dataset properties: +# +LABELS_PATH = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] + + +## Preprocessed input images' properties: +# +IMAGE_DIR = os.getenv('CM_DATASET_PREPROCESSED_PATH') +IMAGE_DATA_TYPE = os.getenv('CM_DATASET_PREPROCESSED_DATA_TYPE', 'float32') + + +def load_labels(labels_filepath): + my_labels = [] + input_file = open(labels_filepath, 'r') + for l in input_file: + my_labels.append(l.strip()) + return my_labels + +class_labels = load_labels(LABELS_PATH) + + +# Load preprocessed image filenames: +image_list = [] +all_images = os.listdir(IMAGE_DIR) +for image_file in all_images: + if image_file.endswith('.npy'): + image_list.append(image_file) + +def load_image_by_index_and_normalize(image_index): + + img_file = os.path.join(IMAGE_DIR, image_list[image_index]) + + img = np.fromfile(img_file, np.dtype(IMAGE_DATA_TYPE)) + #img = img.reshape((1,MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, 3)) + img.resize(224*224*3) + img = img.reshape((MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, MODEL_IMAGE_CHANNELS)) + if MODEL_COLOURS_BGR: + img = img[...,::-1] # swapping Red and Blue colour channels + + if IMAGE_DATA_TYPE != 'float32': + img = img.astype(np.float32) + + # Normalize + if MODEL_NORMALIZE_DATA: + img /= (255.0/(MODEL_NORMALIZE_UPPER-MODEL_NORMALIZE_LOWER)) + img += MODEL_NORMALIZE_LOWER + + # Subtract mean value + if len(GIVEN_CHANNEL_MEANS): + img -= GIVEN_CHANNEL_MEANS + elif SUBTRACT_MEAN: + img -= np.mean(img, axis=(0,1), keepdims=True) + + if len(GIVEN_CHANNEL_STDS): + img /= GIVEN_CHANNEL_STDS + + if MODEL_INPUT_DATA_TYPE == 'int8' or INTERMEDIATE_DATA_TYPE==np.int8: + img = np.clip(img, -128, 127).astype(INTERMEDIATE_DATA_TYPE) + + if MODEL_DATA_LAYOUT == 'NCHW': + img = img.transpose(2,0,1) + elif MODEL_DATA_LAYOUT == 'CHW4': + img = np.pad(img, ((0,0), (0,0), (0,1)), 'constant') + + # Add img to batch + return img.astype(MODEL_INPUT_DATA_TYPE) + + +def load_preprocessed_batch(image_list, image_index): + batch_data = None + for in_batch_idx in range(BATCH_SIZE): + img = load_image_by_index_and_normalize(image_index) + if batch_data is None: + batch_data = np.empty( (BATCH_SIZE, *img.shape), dtype=MODEL_INPUT_DATA_TYPE) + batch_data[in_batch_idx] = img + image_index += 1 + + #print('Data shape: {}'.format(batch_data.shape)) + + if MODEL_USE_DLA and MODEL_MAX_BATCH_SIZE>len(batch_data): + return np.pad(batch_data, ((0,MODEL_MAX_BATCH_SIZE-len(batch_data)), (0,0), (0,0), (0,0)), 'constant'), image_index + else: + return batch_data, image_index diff --git a/script/get-dataset-imagenet-train/README.md b/script/get-dataset-imagenet-train/README.md new file mode 100644 index 0000000000..16a11007f5 --- /dev/null +++ b/script/get-dataset-imagenet-train/README.md @@ -0,0 +1,149 @@ +Automatically generated README for this automation recipe: **get-dataset-imagenet-train** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-imagenet-train,2bec165da5cc4ebf) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,imagenet,train,dataset,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get imagenet train dataset original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,imagenet,train,dataset,original` + +`cm run script --tags=get,imagenet,train,dataset,original [--input_flags]` + +*or* + +`cmr "get imagenet train dataset original"` + +`cmr "get imagenet train dataset original " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,imagenet,train,dataset,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,imagenet,train,dataset,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,imagenet,train,dataset,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get imagenet train dataset original" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input=value` → `IMAGENET_TRAIN_PATH=value` +* `--torrent=value` → `CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train/_cm.json)*** + * download-and-extract,file,_extract + * `if (CM_DATASET_IMAGENET_VAL_REQUIRE_DAE in ['yes', 'True'])` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + * file,extract + * `if (CM_DAE_ONLY_EXTRACT in ['yes', 'True'])` + - CM script: [extract-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/extract-file) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-train/_cm.json) + +___ +### Script output +`cmr "get imagenet train dataset original " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_IMAGENET_*` +* `CM_DATASET_PATH` +#### New environment keys auto-detected from customize + +* `CM_DATASET_IMAGENET_PATH` +* `CM_DATASET_IMAGENET_TRAIN_PATH` +* `CM_DATASET_IMAGENET_TRAIN_REQUIRE_DAE` +* `CM_DATASET_IMAGENET_VAL_REQUIRE_DAE` +* `CM_DATASET_PATH` \ No newline at end of file diff --git a/script/get-dataset-imagenet-train/_cm.json b/script/get-dataset-imagenet-train/_cm.json new file mode 100644 index 0000000000..35c867ecc3 --- /dev/null +++ b/script/get-dataset-imagenet-train/_cm.json @@ -0,0 +1,60 @@ +{ + "alias": "get-dataset-imagenet-train", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "deps": [], + "input_description": {}, + "input_mapping": { + "torrent": "CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH", + "input": "IMAGENET_TRAIN_PATH" + }, + "new_env_keys": [ + "CM_DATASET_PATH", + "CM_DATASET_IMAGENET_*" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [ + { + "tags": "download-and-extract,file,_extract", + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_DAE_URL" ] + }, + "update_tags_from_env": [ + "CM_DAE_EXTRA_TAGS" + ], + "env": { + "CM_EXTRACT_TO_FOLDER": "imagenet-2012-train" + }, + "enable_if_env": { + "CM_DATASET_IMAGENET_VAL_REQUIRE_DAE": ["yes", "True"] + } + }, + { + "tags": "file,extract", + "update_tags_from_env_with_prefix": { + "_path.": [ "CM_EXTRACT_PATH" ] + }, + "env": { + "CM_EXTRACT_TO_FOLDER": "imagenet-2012-train" + }, + "enable_if_env": { + "CM_DAE_ONLY_EXTRACT": ["yes", "True"] + } + } + ], + "tags": [ + "get", + "imagenet", + "train", + "dataset", + "original" + ], + "uid": "2bec165da5cc4ebf", + "variations": { + }, + "versions": {} +} diff --git a/script/get-dataset-imagenet-train/customize.py b/script/get-dataset-imagenet-train/customize.py new file mode 100644 index 0000000000..2eba2b9b15 --- /dev/null +++ b/script/get-dataset-imagenet-train/customize.py @@ -0,0 +1,66 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + meta = i['meta'] + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':0} + + env['CM_DATASET_IMAGENET_TRAIN_REQUIRE_DAE'] = 'no' + + path = env.get('CM_INPUT', env.get('IMAGENET_TRAIN_PATH', '')).strip() + + if path == '': + if env.get('CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH'): + path = env['CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH'] + env['CM_DAE_EXTRA_TAGS'] = "_torrent" + env['CM_DAE_TORRENT_PATH'] = path + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + + return {'return':0} + + else: + return {'return':1, 'error':'Please rerun the last CM command with --env.IMAGENET_TRAIN_PATH={path the folder containing full ImageNet training images} or envoke cm run script "get train dataset imagenet" --input={path to the folder containing ImageNet training images}'} + + + elif not os.path.isdir(path): + if path.endswith(".tar"): + #env['CM_DAE_FILEPATH'] = path + env['CM_EXTRACT_FILEPATH'] = path + env['CM_DAE_ONLY_EXTRACT'] = 'yes' + return {'return':0} + else: + return {'return':1, 'error':'Path {} doesn\'t exist'.format(path)} + else: + env['CM_EXTRACT_EXTRACTED_PATH'] = path + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':0} + + env = i['env'] + + path = env['CM_EXTRACT_EXTRACTED_PATH'] + + path_tar = os.path.join(path, 'n01440764.tar') + + if not os.path.isfile(path_tar): + return {'return':1, 'error':'ImageNet file {} not found'.format(path_tar)} + + env['CM_DATASET_PATH'] = path + env['CM_DATASET_IMAGENET_PATH'] = path + env['CM_DATASET_IMAGENET_TRAIN_PATH'] = path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = path + + return {'return':0} diff --git a/script/get-dataset-imagenet-train/run.sh b/script/get-dataset-imagenet-train/run.sh new file mode 100644 index 0000000000..be86fb43c9 --- /dev/null +++ b/script/get-dataset-imagenet-train/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +test $? -eq 0 || exit $? diff --git a/script/get-dataset-imagenet-val/README-extra.md b/script/get-dataset-imagenet-val/README-extra.md new file mode 100644 index 0000000000..06d67c9493 --- /dev/null +++ b/script/get-dataset-imagenet-val/README-extra.md @@ -0,0 +1,28 @@ +## Notes + +The ImageNet 2012 validation data set is no longer publicly available [here](https://image-net.org/download.php). + +However, it seems that you can still download it via [Academic Torrents](https://academictorrents.com/details/5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5). +You can then register in the MLCommons CM using this portable CM script as follows: + +```bash +cm pull repo mlcommons@ck +``` + +```bash +cm run script "get validation dataset imagenet _2012 _full" --input={directory with ILSVRC2012_val_00000001.JPEG} +``` + +Alternatively, you can download the imagenet validation dataset via torrent by giving the torrent URL as follows. + +```bash +cm run script "get validation dataset imagenet _2012 _full" --torrent={Torrent URL} +``` + +It can now be automatically plugged into other portable CM scripts for image classification including MLPerf inference vision benchmarks. + +You can also find the images and use them directly as follows: + +```bash +cm find cache --tags=dataset,validation,imagenet,_full +``` diff --git a/script/get-dataset-imagenet-val/README.md b/script/get-dataset-imagenet-val/README.md new file mode 100644 index 0000000000..818900346c --- /dev/null +++ b/script/get-dataset-imagenet-val/README.md @@ -0,0 +1,209 @@ +Automatically generated README for this automation recipe: **get-dataset-imagenet-val** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-imagenet-val,7afd58d287fe4f11) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,val,validation,dataset,imagenet,ILSVRC,image-classification,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get val validation dataset imagenet ILSVRC image-classification original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,val,validation,dataset,imagenet,ILSVRC,image-classification,original` + +`cm run script --tags=get,val,validation,dataset,imagenet,ILSVRC,image-classification,original[,variations] [--input_flags]` + +*or* + +`cmr "get val validation dataset imagenet ILSVRC image-classification original"` + +`cmr "get val validation dataset imagenet ILSVRC image-classification original [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,val,validation,dataset,imagenet,ILSVRC,image-classification,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,val,validation,dataset,imagenet,ILSVRC,image-classification,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,val,validation,dataset,imagenet,ILSVRC,image-classification,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get val validation dataset imagenet ILSVRC image-classification original[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_2012-500` + - Workflow: + * `_2012-full` + - Workflow: + +
+ + + * Group "**count**" +
+ Click here to expand this section. + + * `_full` + - Environment variables: + - *CM_DATASET_SIZE*: `50000` + - *CM_IMAGENET_FULL*: `yes` + - *CM_DAE_FILENAME*: `ILSVRC2012_img_val.tar` + - *CM_DAE_DOWNLOADED_CHECKSUM*: `29b22e2961454d5413ddabcf34fc5622` + - Workflow: + * `_size.#` + - Environment variables: + - *CM_DATASET_SIZE*: `#` + - Workflow: + * **`_size.500`** (default) + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - *CM_DAE_FILENAME*: `ILSVRC2012_img_val_500.tar` + - *CM_DAE_URL*: `http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar` + - Workflow: + +
+ + + * Group "**dataset-version**" +
+ Click here to expand this section. + + * **`_2012`** (default) + - Environment variables: + - *CM_DATASET_VER*: `2012` + - Workflow: + +
+ + +#### Default variations + +`_2012,_size.500` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--imagenet_path=value` → `IMAGENET_PATH=value` +* `--torrent=value` → `CM_DATASET_IMAGENET_VAL_TORRENT_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "imagenet_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val/_cm.json)*** + * download-and-extract,file,_extract + * `if (CM_DATASET_IMAGENET_VAL_REQUIRE_DAE in ['yes', 'True'])` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + * file,extract,_no-remove-extracted + * `if (CM_DAE_ONLY_EXTRACT in ['yes', 'True'])` + - CM script: [extract-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/extract-file) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val/run.bat) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-imagenet-val/_cm.json) + +___ +### Script output +`cmr "get val validation dataset imagenet ILSVRC image-classification original [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_IMAGENET_PATH` +* `CM_DATASET_IMAGENET_VAL_PATH` +* `CM_DATASET_PATH` +* `CM_DATASET_SIZE` +* `CM_DATASET_VER` +#### New environment keys auto-detected from customize + +* `CM_DATASET_IMAGENET_PATH` +* `CM_DATASET_IMAGENET_VAL_PATH` +* `CM_DATASET_PATH` \ No newline at end of file diff --git a/script/get-dataset-imagenet-val/_cm.json b/script/get-dataset-imagenet-val/_cm.json new file mode 100644 index 0000000000..3dcc8a5002 --- /dev/null +++ b/script/get-dataset-imagenet-val/_cm.json @@ -0,0 +1,114 @@ +{ + "alias": "get-dataset-imagenet-val", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "env": { + "CM_DATASET": "IMAGENET" + }, + "new_env_keys": [ + "CM_DATASET_PATH", + "CM_DATASET_IMAGENET_PATH", + "CM_DATASET_IMAGENET_VAL_PATH", + "CM_DATASET_SIZE", + "CM_DATASET_VER" + ], + "tags": [ + "get", + "val", + "validation", + "dataset", + "imagenet", + "ILSVRC", + "image-classification", + "original" + ], + "input_mapping": { + "imagenet_path": "IMAGENET_PATH", + "torrent": "CM_DATASET_IMAGENET_VAL_TORRENT_PATH" + }, + "uid": "7afd58d287fe4f11", + "deps": [ + { + "tags": "detect,os" + } + ], + "prehook_deps": [ + { + "tags": "download-and-extract,file,_extract", + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_DAE_URL" ] + }, + "update_tags_from_env": [ + "CM_DAE_EXTRA_TAGS" + ], + "env": { + "CM_EXTRACT_TO_FOLDER": "imagenet-2012-val" + }, + "enable_if_env": { + "CM_DATASET_IMAGENET_VAL_REQUIRE_DAE": ["yes", "True"] + } + }, + { + "tags": "file,extract,_no-remove-extracted", + "update_tags_from_env_with_prefix": { + "_path.": [ "CM_EXTRACT_PATH" ] + }, + "env": { + "CM_EXTRACT_TO_FOLDER": "imagenet-2012-val" + }, + "enable_if_env": { + "CM_DAE_ONLY_EXTRACT": ["yes", "True"] + } + } + ], + "variations": { + "2012": { + "group": "dataset-version", + "default": true, + "env": { + "CM_DATASET_VER": "2012" + } + }, + "2012-500": { + "base": [ + "size.500", + "2012" + ] + }, + "2012-full": { + "base": [ + "full", + "2012" + ] + }, + "full": { + "group": "count", + "env": { + "CM_DATASET_SIZE": "50000", + "CM_IMAGENET_FULL": "yes", + "CM_DAE_FILENAME": "ILSVRC2012_img_val.tar", + "CM_DAE_DOWNLOADED_CHECKSUM": "29b22e2961454d5413ddabcf34fc5622" + } + }, + "size.500": { + "group": "count", + "default": true, + "env": { + "CM_DATASET_SIZE": "500", + "CM_DAE_FILENAME": "ILSVRC2012_img_val_500.tar", + "CM_DAE_URL": "http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar" + } + }, + "size.#": { + "group": "count", + "env": { + "CM_DATASET_SIZE": "#" + } + } + }, + "docker": { + "run": false + } +} diff --git a/script/get-dataset-imagenet-val/customize.py b/script/get-dataset-imagenet-val/customize.py new file mode 100644 index 0000000000..e35af9664f --- /dev/null +++ b/script/get-dataset-imagenet-val/customize.py @@ -0,0 +1,79 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + meta = i['meta'] + os_info = i['os_info'] + + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'no' + + full = env.get('CM_IMAGENET_FULL', '').strip() == 'yes' + + path = env.get('CM_INPUT', env.get('IMAGENET_PATH', env.get('CM_DATASET_IMAGENET_PATH', ''))).strip() + + if path == '': + if full: + + if env.get('CM_DATASET_IMAGENET_VAL_TORRENT_PATH'): + path = env['CM_DATASET_IMAGENET_VAL_TORRENT_PATH'] + env['CM_DAE_EXTRA_TAGS'] = "_torrent" + env['CM_DAE_TORRENT_PATH'] = path + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + return {'return':0} + + else: + env['CM_DAE_URL'] = 'https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar' + env['CM_DAE_FILENAME'] = 'ILSVRC2012_img_val.tar' + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + + return {'return':0} + #return {'return':1, 'error':'Please rerun the last CM command with --env.IMAGENET_PATH={path the folder containing full ImageNet images} or envoke cm run script "get val dataset imagenet" --input={path to the folder containing ImageNet images}'} + + else: + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + + + elif not os.path.isdir(path): + if path.endswith(".tar"): + env['CM_EXTRACT_FILEPATH'] = path + env['CM_DAE_ONLY_EXTRACT'] = 'yes' + return {'return':0} + else: + return {'return':1, 'error':'Path {} doesn\'t exist'.format(path)} + else: + env['CM_EXTRACT_EXTRACTED_PATH'] = path + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + path = env['CM_EXTRACT_EXTRACTED_PATH'] + path1 = os.path.join(path, 'imagenet-2012-val') + if os.path.isdir(path1): + path = path1 + + path_image = os.path.join(path, 'ILSVRC2012_val_00000001.JPEG') + + if not os.path.isfile(path_image): + return {'return':1, 'error':'ImageNet file {} not found'.format(path_image)} + + files = os.listdir(path) + if len(files) < int(env.get('CM_DATASET_SIZE', 0)): + return {'return':1, 'error':'Only {} files found in {}. {} expected'.format(len(files), path, env.get('CM_DATASET_SIZE'))} + + env['CM_DATASET_PATH'] = path + env['CM_DATASET_IMAGENET_PATH'] = path + env['CM_DATASET_IMAGENET_VAL_PATH'] = path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = path + + return {'return':0} + diff --git a/script/get-dataset-imagenet-val/run.bat b/script/get-dataset-imagenet-val/run.bat new file mode 100644 index 0000000000..94625b7e5c --- /dev/null +++ b/script/get-dataset-imagenet-val/run.bat @@ -0,0 +1,17 @@ +if "%CM_EXTRACT_EXTRACTED_PATH%" == "" ( + echo. + + wget -nc https://www.dropbox.com/s/57s11df6pts3z69/ILSVRC2012_img_val_500.tar --no-check-certificate + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + mkdir images + + tar -C images -xvf ILSVRC2012_img_val_500.tar + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + del /Q /S ILSVRC2012_img_val_500.tar + + echo CM_DATASET_PATH=%CD%\images > tmp-run-env.out + echo CM_DATASET_IMAGENET_PATH=%CD%\images >> tmp-run-env.out + echo CM_DATASET_IMAGENET_VAL_PATH=%CD%\images >> tmp-run-env.out +) diff --git a/script/get-dataset-kits19/README.md b/script/get-dataset-kits19/README.md new file mode 100644 index 0000000000..b6f2057843 --- /dev/null +++ b/script/get-dataset-kits19/README.md @@ -0,0 +1,174 @@ +Automatically generated README for this automation recipe: **get-dataset-kits19** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-kits19,79992bb221024ac5) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,medical-imaging,kits,original,kits19* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset medical-imaging kits original kits19" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,medical-imaging,kits,original,kits19` + +`cm run script --tags=get,dataset,medical-imaging,kits,original,kits19[,variations] ` + +*or* + +`cmr "get dataset medical-imaging kits original kits19"` + +`cmr "get dataset medical-imaging kits original kits19 [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,medical-imaging,kits,original,kits19' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,medical-imaging,kits,original,kits19"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,medical-imaging,kits,original,kits19) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset medical-imaging kits original kits19[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_CALIBRATION*: `yes` + - Workflow: + * `_default` + - Environment variables: + - *CM_GIT_PATCH*: `no` + - Workflow: + * `_full-history` + - Environment variables: + - *CM_GIT_DEPTH*: `` + - Workflow: + * `_no-recurse-submodules` + - Environment variables: + - *CM_GIT_RECURSE_SUBMODULES*: `` + - Workflow: + * `_patch` + - Environment variables: + - *CM_GIT_PATCH*: `yes` + - Workflow: + * `_short-history` + - Environment variables: + - *CM_GIT_DEPTH*: `--depth 5` + - Workflow: + * `_validation` + - Environment variables: + - *CM_DATASET_VALIDATION*: `yes` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT: `master` +* CM_GIT_DEPTH: `--depth 2` +* CM_GIT_PATCH: `no` +* CM_GIT_RECURSE_SUBMODULES: `` +* CM_GIT_URL: `https://github.com/neheller/kits19` + +
+ +#### Versions +Default version: `master` + +* `custom` +* `master` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-kits19/_cm.json) + +___ +### Script output +`cmr "get dataset medical-imaging kits original kits19 [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_PATH` \ No newline at end of file diff --git a/script/get-dataset-kits19/_cm.json b/script/get-dataset-kits19/_cm.json new file mode 100644 index 0000000000..816a54459e --- /dev/null +++ b/script/get-dataset-kits19/_cm.json @@ -0,0 +1,85 @@ +{ + "alias": "get-dataset-kits19", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "default_env": { + "CM_GIT_CHECKOUT": "master", + "CM_GIT_DEPTH": "--depth 2", + "CM_GIT_PATCH": "no", + "CM_GIT_RECURSE_SUBMODULES": "", + "CM_GIT_URL": "https://github.com/neheller/kits19" + }, + "default_version": "master", + "deps": [ + { + "tags": "detect,os" + } + ], + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "medical-imaging", + "kits", + "original", + "kits19" + ], + "uid": "79992bb221024ac5", + "variations": { + "default": { + "base": [ + "short-history" + ], + "env": { + "CM_GIT_PATCH": "no" + } + }, + "full-history": { + "env": { + "CM_GIT_DEPTH": "" + } + }, + "no-recurse-submodules": { + "env": { + "CM_GIT_RECURSE_SUBMODULES": "" + } + }, + "patch": { + "env": { + "CM_GIT_PATCH": "yes" + } + }, + "short-history": { + "env": { + "CM_GIT_DEPTH": "--depth 5" + } + }, + "validation": { + "env": { + "CM_DATASET_VALIDATION": "yes" + } + }, + "calibration": { + "env": { + "CM_DATASET_CALIBRATION": "yes" + } + } + }, + "versions": { + "custom": { + "env": { + "CM_GIT_CHECKOUT": "", + "CM_GIT_SHA": "yes" + } + }, + "master": { + "env": { + "CM_GIT_CHECKOUT": "master" + } + } + } +} diff --git a/script/get-dataset-kits19/customize.py b/script/get-dataset-kits19/customize.py new file mode 100644 index 0000000000..97c583e9d5 --- /dev/null +++ b/script/get-dataset-kits19/customize.py @@ -0,0 +1,39 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if not env.get('CM_GIT_CHECKOUT',''): + return {'return':1, 'error': 'Please provide a valid CM_GIT_SHA inside the custom variation of _cm.json'} + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + need_version = env.get('CM_VERSION','') + versions = meta['versions'] + + if need_version!='' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'kits19', 'data') + state = i['state'] + + return {'return':0} diff --git a/script/get-dataset-kits19/run.sh b/script/get-dataset-kits19/run.sh new file mode 100644 index 0000000000..f5bf0617a0 --- /dev/null +++ b/script/get-dataset-kits19/run.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning kits19 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + +if [ ! -d "kits19" ]; then + if [ -z ${CM_GIT_SHA} ]; then + cmd="git clone ${CM_GIT_RECURSE_SUBMODULES} -b ${CM_GIT_CHECKOUT} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19" + echo $cmd + eval $cmd + cd kits19 + else + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19 + cd kits19 + git checkout -b "${CM_GIT_CHECKOUT}" + fi + if [ "${?}" != "0" ]; then exit 1; fi +else + cd kits19 +fi + +if [ ${CM_GIT_PATCH} == "yes" ]; then + patch_filename=${CM_GIT_PATCH_FILENAME} + if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then + patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} + CM_GIT_PATCH_FILENAMES=$patchfile + fi + IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + for patch_filename in "${patch_files[@]}" + do + echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + git apply ${SCRIPT_DIR}/patch/"$patch_filename" + if [ "${?}" != "0" ]; then exit 1; fi + done +fi +cd ${CUR_DIR}/kits19 +${CM_PYTHON_BIN_WITH_PATH} -m starter_code.get_imaging +cd data +cp -rf case_00185 case_00400 +cd "$CUR_DIR" diff --git a/script/get-dataset-librispeech/README-extra.md b/script/get-dataset-librispeech/README-extra.md new file mode 100644 index 0000000000..265902c92b --- /dev/null +++ b/script/get-dataset-librispeech/README-extra.md @@ -0,0 +1,26 @@ +# Downloads LibriSpeech Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the LibriSpeech dataset. + +## Usage + +``` +cm run script --tags=get,dataset,librispeech --version=[VERSION] +``` +where [VERSION] is one of +* `dev-clean` +* `dev-other` +* `train-clean` +* `train-other` +* `train-clean-100` +* `train-clean-360` +* `train-other-500` + +## Exported Variables +* `CM_DATASET_ARCHIVE:` +* `CM_DATASET_LIBRISPEECH_PATH:` +* `CM_DATASET_MD5:` +* `CM_DATASET_NAME:` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-dataset-librispeech/README.md b/script/get-dataset-librispeech/README.md new file mode 100644 index 0000000000..49ccb98cf6 --- /dev/null +++ b/script/get-dataset-librispeech/README.md @@ -0,0 +1,136 @@ +Automatically generated README for this automation recipe: **get-dataset-librispeech** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-librispeech,09f29df607e0415d) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset speech speech-recognition librispeech validation audio training original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original` + +`cm run script --tags=get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original ` + +*or* + +`cmr "get dataset speech speech-recognition librispeech validation audio training original"` + +`cmr "get dataset speech speech-recognition librispeech validation audio training original " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset speech speech-recognition librispeech validation audio training original" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `dev-clean` + +* `dev-clean` +* `dev-other` +* `test-clean` +* `test-other` +* `train-clean-100` +* `train-clean-360` +* `train-other-500` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech/_cm.json)*** + * get,sys-utils-cm + * CM names: `--adr.['sys-utils']...` + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-librispeech/_cm.json) + +___ +### Script output +`cmr "get dataset speech speech-recognition librispeech validation audio training original " -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_LIBRISPEECH_PATH` +* `CM_DATASET_PATH` \ No newline at end of file diff --git a/script/get-dataset-librispeech/_cm.json b/script/get-dataset-librispeech/_cm.json new file mode 100644 index 0000000000..f93de229a7 --- /dev/null +++ b/script/get-dataset-librispeech/_cm.json @@ -0,0 +1,86 @@ +{ + "alias": "get-dataset-librispeech", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "default_version": "dev-clean", + "category": "AI/ML datasets", + "deps": [ + { + "names": [ + "sys-utils" + ], + "tags": "get,sys-utils-cm" + } + ], + "env": { + "CM_DATASET": "LIBRISPEECH", + "CM_WGET_URL": "http://www.openslr.org/resources/12/<<>>" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "speech", + "speech-recognition", + "librispeech", + "validation", + "audio", + "training", + "original" + ], + "uid": "09f29df607e0415d", + "versions": { + "dev-clean": { + "env": { + "CM_DATASET_ARCHIVE": "dev-clean.tar.gz", + "CM_DATASET_MD5": "42e2234ba48799c1f50f24a7926300a1", + "CM_DATASET_NAME": "LibriSpeech Dev Clean dataset" + } + }, + "dev-other": { + "env": { + "CM_DATASET_ARCHIVE": "dev-other.tar.gz", + "CM_DATASET_MD5": "c8d0bcc9cca99d4f8b62fcc847357931", + "CM_DATASET_NAME": "LibriSpeech Dev Other dataset" + } + }, + "test-clean": { + "env": { + "CM_DATASET_ARCHIVE": "test-clean.tar.gz", + "CM_DATASET_MD5": "32fa31d27d2e1cad72775fee3f4849a9", + "CM_DATASET_NAME": "LibriSpeech Test Clean dataset" + } + }, + "test-other": { + "env": { + "CM_DATASET_ARCHIVE": "test-other.tar.gz", + "CM_DATASET_MD5": "fb5a50374b501bb3bac4815ee91d3135", + "CM_DATASET_NAME": "LibriSpeech Test Other dataset" + } + }, + "train-clean-100": { + "env": { + "CM_DATASET_ARCHIVE": "train-clean-100.tar.gz", + "CM_DATASET_MD5": "2a93770f6d5c6c964bc36631d331a522", + "CM_DATASET_NAME": "LibriSpeech Train Clean 100 dataset" + } + }, + "train-clean-360": { + "env": { + "DATASET_ARCHIVE": "train-clean-360.tar.gz", + "DATASET_MD5": "c0e676e450a7ff2f54aeade5171606fa", + "DATASET_NAME": "LibriSpeech Train Clean 360 dataset" + } + }, + "train-other-500": { + "env": { + "DATASET_ARCHIVE": "train-other-500.tar.gz", + "DATASET_MD5": "d1a0fd59409feb2c614ce4d30c387708", + "DATASET_NAME": "LibriSpeech Train Other 500 dataset" + } + } + } +} diff --git a/script/get-dataset-librispeech/customize.py b/script/get-dataset-librispeech/customize.py new file mode 100644 index 0000000000..85ec8e43c9 --- /dev/null +++ b/script/get-dataset-librispeech/customize.py @@ -0,0 +1,19 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return':0} + + +def postprocess(i): + env = i['env'] + folder_name = env['CM_DATASET_ARCHIVE'].split(".")[0] + env['CM_DATASET_LIBRISPEECH_PATH'] = os.path.join(os.getcwd(), "LibriSpeech", folder_name) + env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), "LibriSpeech", folder_name) + + return {'return':0} diff --git a/script/get-dataset-librispeech/run.sh b/script/get-dataset-librispeech/run.sh new file mode 100644 index 0000000000..9c2fc26608 --- /dev/null +++ b/script/get-dataset-librispeech/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +wget -nc ${CM_WGET_URL} --no-check-certificate +test $? -eq 0 || exit 1 + +tar -x --skip-old-files -vf ${CM_DATASET_ARCHIVE} +test $? -eq 0 || exit 1 + diff --git a/script/get-dataset-openimages-annotations/README.md b/script/get-dataset-openimages-annotations/README.md new file mode 100644 index 0000000000..d7d1e4a991 --- /dev/null +++ b/script/get-dataset-openimages-annotations/README.md @@ -0,0 +1,146 @@ +Automatically generated README for this automation recipe: **get-dataset-openimages-annotations** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-openimages-annotations,47e2158ed24c44e9) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,aux,dataset-aux,object-detection,openimages,annotations* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get aux dataset-aux object-detection openimages annotations" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,aux,dataset-aux,object-detection,openimages,annotations` + +`cm run script --tags=get,aux,dataset-aux,object-detection,openimages,annotations[,variations] ` + +*or* + +`cmr "get aux dataset-aux object-detection openimages annotations"` + +`cmr "get aux dataset-aux object-detection openimages annotations [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aux,dataset-aux,object-detection,openimages,annotations' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,aux,dataset-aux,object-detection,openimages,annotations"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,aux,dataset-aux,object-detection,openimages,annotations) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get aux dataset-aux object-detection openimages annotations[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**download-source**" +
+ Click here to expand this section. + + * **`_from.github`** (default) + - Environment variables: + - *CM_WGET_URL*: `https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip` + - Workflow: + +
+ + +#### Default variations + +`_from.github` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-annotations/_cm.json) + +___ +### Script output +`cmr "get aux dataset-aux object-detection openimages annotations [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_ANNOTATIONS_*` +* `CM_DATASET_OPENIMAGES_ANNOTATIONS_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_ANNOTATIONS_DIR_PATH` +* `CM_DATASET_ANNOTATIONS_FILE_PATH` +* `CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH` +* `CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH` \ No newline at end of file diff --git a/script/get-dataset-openimages-annotations/_cm.json b/script/get-dataset-openimages-annotations/_cm.json new file mode 100644 index 0000000000..2cfa033e21 --- /dev/null +++ b/script/get-dataset-openimages-annotations/_cm.json @@ -0,0 +1,29 @@ +{ + "alias": "get-dataset-openimages-annotations", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "new_env_keys": [ + "CM_DATASET_OPENIMAGES_ANNOTATIONS_*", + "CM_DATASET_ANNOTATIONS_*" + ], + "tags": [ + "get", + "aux", + "dataset-aux", + "object-detection", + "openimages", + "annotations" + ], + "uid": "47e2158ed24c44e9", + "variations": { + "from.github": { + "default": true, + "env": { + "CM_WGET_URL": "https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip" + }, + "group": "download-source" + } + } +} diff --git a/script/get-dataset-openimages-annotations/customize.py b/script/get-dataset-openimages-annotations/customize.py new file mode 100644 index 0000000000..fa5f4b2630 --- /dev/null +++ b/script/get-dataset-openimages-annotations/customize.py @@ -0,0 +1,24 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + url = env['CM_WGET_URL'] + filename=os.path.basename(url) + env['CM_WGET_ZIP_FILE_NAME'] = filename + + return {'return':0} + + +def postprocess(i): + env = i['env'] + + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join(os.getcwd(), 'openimages-mlperf.json') + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join(os.getcwd()) + env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] = env['CM_DATASET_ANNOTATIONS_DIR_PATH'] + + return {'return':0} diff --git a/script/get-dataset-openimages-annotations/run.sh b/script/get-dataset-openimages-annotations/run.sh new file mode 100644 index 0000000000..72c8557a4e --- /dev/null +++ b/script/get-dataset-openimages-annotations/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +cmd="wget -nc ${CM_WGET_URL} --no-check-certificate" +echo $cmd +eval $cmd +cmd="unzip ${CM_WGET_ZIP_FILE_NAME}" +echo $cmd +eval $cmd +test $? -eq 0 || exit 1 diff --git a/script/get-dataset-openimages-calibration/README.md b/script/get-dataset-openimages-calibration/README.md new file mode 100644 index 0000000000..bfaf9cb03a --- /dev/null +++ b/script/get-dataset-openimages-calibration/README.md @@ -0,0 +1,180 @@ +Automatically generated README for this automation recipe: **get-dataset-openimages-calibration** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-openimages-calibration,27228976bb084dd0) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,openimages,calibration* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset openimages calibration" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,openimages,calibration` + +`cm run script --tags=get,dataset,openimages,calibration[,variations] ` + +*or* + +`cmr "get dataset openimages calibration"` + +`cmr "get dataset openimages calibration [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openimages,calibration' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,openimages,calibration"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,openimages,calibration) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset openimages calibration[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_filter` + - Environment variables: + - *CM_CALIBRATE_FILTER*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,openimages,dataset,original,_calibration + - CM script: [get-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages) + +
+ + + * Group "**calibration-option**" +
+ Click here to expand this section. + + * **`_mlperf.option1`** (default) + - Environment variables: + - *CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION*: `one` + - *CM_DOWNLOAD_CHECKSUM1*: `f09719174af3553119e2c621157773a6` + - Workflow: + +
+ + + * Group "**filter-size**" +
+ Click here to expand this section. + + * `_filter-size.#` + - Environment variables: + - *CM_CALIBRATION_FILTER_SIZE*: `#` + - Workflow: + * `_filter-size.400` + - Environment variables: + - *CM_CALIBRATION_FILTER_SIZE*: `400` + - Workflow: + +
+ + +#### Default variations + +`_mlperf.option1` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration/_cm.yaml)*** + * download,file + * CM names: `--adr.['calibration-file-downloader']...` + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration/_cm.yaml) + 1. ***Run native script if exists*** + * [run-filter.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration/run-filter.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages-calibration/_cm.yaml) + +___ +### Script output +`cmr "get dataset openimages calibration [,variations]" -j` +#### New environment keys (filter) + +* `CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH` \ No newline at end of file diff --git a/script/get-dataset-openimages-calibration/_cm.yaml b/script/get-dataset-openimages-calibration/_cm.yaml new file mode 100644 index 0000000000..742a494d2f --- /dev/null +++ b/script/get-dataset-openimages-calibration/_cm.yaml @@ -0,0 +1,60 @@ +uid: 27228976bb084dd0 +alias: get-dataset-openimages-calibration + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: "AI/ML datasets" + +deps: + - tags: download,file + force_cache: true + extra_cache_tags: openimages-calibration,openimages,calibration + names: + - calibration-file-downloader + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH + +new_env_keys: +- CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH + +tags: +- get +- dataset +- openimages +- calibration + +variations: + mlperf.option1: + group: calibration-option + default: true + env: + CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION: one + CM_DOWNLOAD_CHECKSUM1: f09719174af3553119e2c621157773a6 + adr: + calibration-file-downloader: + tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/openimages/openimages_cal_images_list.txt + filter: + default_variations: + filter-size: filter_size.400 + deps: + - names: + - python + - python3 + tags: get,python3 + - tags: get,openimages,dataset,original,_calibration + env: + CM_CALIBRATE_FILTER: '' + env: + CM_CALIBRATE_FILTER: 'yes' + + filter-size.#: + group: filter-size + env: + CM_CALIBRATION_FILTER_SIZE: "#" + filter-size.400: + group: filter-size + env: + CM_CALIBRATION_FILTER_SIZE: 400 diff --git a/script/get-dataset-openimages-calibration/customize.py b/script/get-dataset-openimages-calibration/customize.py new file mode 100644 index 0000000000..71e1a646d4 --- /dev/null +++ b/script/get-dataset-openimages-calibration/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get("CM_CALIBRATE_FILTER", "") == "yes": + i['run_script_input']['script_name'] = "run-filter" + env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join(os.getcwd(), "filtered.txt") + env['CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH'] = env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/get-dataset-openimages-calibration/filter.py b/script/get-dataset-openimages-calibration/filter.py new file mode 100644 index 0000000000..81b768249c --- /dev/null +++ b/script/get-dataset-openimages-calibration/filter.py @@ -0,0 +1,20 @@ +import json +import sys +import os + +with open(sys.argv[1], "r") as f: + data = json.load(f) + +images= {} +for image in data['images']: + images[image['id']] = image + images[image['id']]['num_boxes'] = 0 + +annots = data['annotations'] +for box in annots: + imageid = box['image_id'] + images[imageid]['num_boxes']+=1 + +sorted_image_data = sorted(data['images'], key=lambda x: x['num_boxes'], reverse= os.environ.get('CM_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC', '') == "yes") +for image in data['images']: + print(image['file_name']) diff --git a/script/get-dataset-openimages-calibration/run-filter.sh b/script/get-dataset-openimages-calibration/run-filter.sh new file mode 100644 index 0000000000..9b1a90c688 --- /dev/null +++ b/script/get-dataset-openimages-calibration/run-filter.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/filter.py ${CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} > ordered.txt +test $? -eq 0 || exit $? +head -n ${CM_CALIBRATION_FILTER_SIZE} ordered.txt >filtered.txt +test $? -eq 0 || exit $? diff --git a/script/get-dataset-openimages/README-extra.md b/script/get-dataset-openimages/README-extra.md new file mode 100644 index 0000000000..b6f5d0812b --- /dev/null +++ b/script/get-dataset-openimages/README-extra.md @@ -0,0 +1,2 @@ +# Ubuntu 22.04 +`sudo apt-get install -y libgl1-mesa-dev` diff --git a/script/get-dataset-openimages/README.md b/script/get-dataset-openimages/README.md new file mode 100644 index 0000000000..6a2d88b0b6 --- /dev/null +++ b/script/get-dataset-openimages/README.md @@ -0,0 +1,251 @@ +Automatically generated README for this automation recipe: **get-dataset-openimages** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-openimages,0a9d49b644cf4142) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,openimages,open-images,object-detection,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset openimages open-images object-detection original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,openimages,open-images,object-detection,original` + +`cm run script --tags=get,dataset,openimages,open-images,object-detection,original[,variations] ` + +*or* + +`cmr "get dataset openimages open-images object-detection original"` + +`cmr "get dataset openimages open-images object-detection original [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openimages,open-images,object-detection,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,openimages,open-images,object-detection,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,openimages,open-images,object-detection,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset openimages open-images object-detection original[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_filter` + - Workflow: + * `_filter,calibration` + - Workflow: + * `_filter-size.#` + - Workflow: + * `_using-fiftyone` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_fiftyone + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,openssl,lib + - CM script: [get-openssl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-openssl) + +
+ + + * Group "**annotations**" +
+ Click here to expand this section. + + * `_custom-annotations` + - Environment variables: + - *CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS*: `yes` + - Workflow: + * **`_default-annotations`** (default) + - Environment variables: + - *CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS*: `no` + - Workflow: + +
+ + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_CALIBRATION*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,openimages,calibration + * CM names: `--adr.['openimages-calibration']...` + - CM script: [get-dataset-openimages-calibration](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages-calibration) + * **`_validation`** (default) + - Environment variables: + - *CM_DATASET_CALIBRATION*: `no` + - Workflow: + +
+ + + * Group "**size**" +
+ Click here to expand this section. + + * **`_50`** (default) + - Environment variables: + - *CM_DATASET_SIZE*: `50` + - Workflow: + * `_500` + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - Workflow: + * `_full` + - Environment variables: + - *CM_DATASET_SIZE*: `` + - Workflow: + * `_size.#` + - Environment variables: + - *CM_DATASET_SIZE*: `#` + - Workflow: + +
+ + +#### Default variations + +`_50,_default-annotations,_validation` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET_CALIBRATION: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/_cm.json)*** + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_requests + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * mlperf,inference,source + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_boto3 + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tqdm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pandas + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pycocotools + * CM names: `--adr.['pycocotools']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/run.sh) + 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/_cm.json)*** + * get,openimages,annotations + * `if (CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS == yes)` + - CM script: [get-dataset-openimages-annotations](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages-annotations) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openimages/_cm.json) + +___ +### Script output +`cmr "get dataset openimages open-images object-detection original [,variations]" -j` +#### New environment keys (filter) + +* `CM_CALIBRATION_DATASET_PATH` +* `CM_DATASET_ANNOTATIONS_DIR_PATH` +* `CM_DATASET_ANNOTATIONS_FILE_PATH` +* `CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH` +* `CM_DATASET_PATH` +* `CM_DATASET_PATH_ROOT` +* `CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH` +#### New environment keys auto-detected from customize + +* `CM_CALIBRATION_DATASET_PATH` +* `CM_DATASET_ANNOTATIONS_DIR_PATH` +* `CM_DATASET_ANNOTATIONS_FILE_PATH` +* `CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH` +* `CM_DATASET_PATH` +* `CM_DATASET_PATH_ROOT` +* `CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH` \ No newline at end of file diff --git a/script/get-dataset-openimages/_cm.json b/script/get-dataset-openimages/_cm.json new file mode 100644 index 0000000000..06ae8c2bb9 --- /dev/null +++ b/script/get-dataset-openimages/_cm.json @@ -0,0 +1,180 @@ +{ + "alias": "get-dataset-openimages", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "category_sort":8500, + "deps": [ + { + "tags": "get,sys-utils-cm" + }, + { + "tags": "get,python3", + "names": [ + "python", + "python3" + ] + }, + { + "tags": "get,generic-python-lib,_requests" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlperf,inference,source", + "force_env_keys": [ + "CM_GIT_*" + ] + }, + { + "tags": "get,generic-python-lib,_boto3" + }, + { + "tags": "get,generic-python-lib,_tqdm" + }, + { + "tags": "get,generic-python-lib,_numpy" + }, + { + "tags": "get,generic-python-lib,_opencv-python" + }, + { + "tags": "get,generic-python-lib,_pandas" + }, + { + "names": [ + "pycocotools" + ], + "tags": "get,generic-python-lib,_pycocotools" + } + ], + "posthook_deps": [ + { + "tags": "get,openimages,annotations", + "enable_if_env": { + "CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS": [ + "yes" + ] + } + } + ], + "default_env": { + "CM_DATASET_CALIBRATION": "no" + }, + "env": { + "CM_DATASET": "OPENIMAGES" + }, + "new_env_keys": [ + "CM_DATASET_PATH", + "CM_DATASET_PATH_ROOT", + "CM_DATASET_ANNOTATIONS_DIR_PATH", + "CM_DATASET_ANNOTATIONS_FILE_PATH", + "CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH", + "CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH", + "CM_CALIBRATION_DATASET_PATH" + ], + "tags": [ + "get", + "dataset", + "openimages", + "open-images", + "object-detection", + "original" + ], + "uid": "0a9d49b644cf4142", + "variations": { + "using-fiftyone": { + "deps": [ + { + "tags": "get,generic-python-lib,_fiftyone" + }, + { + "tags": "get,openssl,lib", + "version": "1.1.1" + } + ], + "add_deps_recursive": { + "inference-src": { + "version": "r2.1" + } + } + }, + "calibration": { + "group": "dataset-type", + "deps": [ + { + "names": [ + "openimages-calibration" + ], + "tags": "get,openimages,calibration" + } + ], + "env": { + "CM_DATASET_CALIBRATION": "yes" + } + }, + "filter": { + }, + "filter-size.#": { + "ad": { + "openimages-calibration": { + "tags": "_filter-size.#" + } + } + }, + "filter,calibration": { + "ad": { + "openimages-calibration": { + "tags": "_filter" + } + } + }, + "validation": { + "group": "dataset-type", + "default": true, + "env": { + "CM_DATASET_CALIBRATION": "no" + } + }, + "size.#": { + "group": "size", + "env": { + "CM_DATASET_SIZE": "#" + } + }, + "500": { + "group": "size", + "env": { + "CM_DATASET_SIZE": "500" + } + }, + "50": { + "group": "size", + "default": true, + "env": { + "CM_DATASET_SIZE": "50" + } + }, + "full": { + "group": "size", + "env": { + "CM_DATASET_SIZE": "" + } + }, + "custom-annotations": { + "group": "annotations", + "env": { + "CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS": "yes" + } + }, + "default-annotations": { + "group": "annotations", + "default": true, + "env": { + "CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS": "no" + } + } + } +} diff --git a/script/get-dataset-openimages/customize.py b/script/get-dataset-openimages/customize.py new file mode 100644 index 0000000000..3040fff2ba --- /dev/null +++ b/script/get-dataset-openimages/customize.py @@ -0,0 +1,82 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + print ("") + print ("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + print ("") + + if os_info['platform'] == 'windows': + MLPERF_CLASSES=['Airplane','Antelope','Apple','Backpack','Balloon','Banana', + 'Barrel','Baseball bat','Baseball glove','Bee','Beer','Bench','Bicycle', + 'Bicycle helmet','Bicycle wheel','Billboard','Book','Bookcase','Boot', + 'Bottle','Bowl','Bowling equipment','Box','Boy','Brassiere','Bread', + 'Broccoli','Bronze sculpture','Bull','Bus','Bust','Butterfly','Cabinetry', + 'Cake','Camel','Camera','Candle','Candy','Cannon','Canoe','Carrot','Cart', + 'Castle','Cat','Cattle','Cello','Chair','Cheese','Chest of drawers','Chicken', + 'Christmas tree','Coat','Cocktail','Coffee','Coffee cup','Coffee table','Coin', + 'Common sunflower','Computer keyboard','Computer monitor','Convenience store', + 'Cookie','Countertop','Cowboy hat','Crab','Crocodile','Cucumber','Cupboard', + 'Curtain','Deer','Desk','Dinosaur','Dog','Doll','Dolphin','Door','Dragonfly', + 'Drawer','Dress','Drum','Duck','Eagle','Earrings','Egg (Food)','Elephant', + 'Falcon','Fedora','Flag','Flowerpot','Football','Football helmet','Fork', + 'Fountain','French fries','French horn','Frog','Giraffe','Girl','Glasses', + 'Goat','Goggles','Goldfish','Gondola','Goose','Grape','Grapefruit','Guitar', + 'Hamburger','Handbag','Harbor seal','Headphones','Helicopter','High heels', + 'Hiking equipment','Horse','House','Houseplant','Human arm','Human beard', + 'Human body','Human ear','Human eye','Human face','Human foot','Human hair', + 'Human hand','Human head','Human leg','Human mouth','Human nose','Ice cream', + 'Jacket','Jeans','Jellyfish','Juice','Kitchen & dining room table','Kite', + 'Lamp','Lantern','Laptop','Lavender (Plant)','Lemon','Light bulb','Lighthouse', + 'Lily','Lion','Lipstick','Lizard','Man','Maple','Microphone','Mirror', + 'Mixing bowl','Mobile phone','Monkey','Motorcycle','Muffin','Mug','Mule', + 'Mushroom','Musical keyboard','Necklace','Nightstand','Office building', + 'Orange','Owl','Oyster','Paddle','Palm tree','Parachute','Parrot','Pen', + 'Penguin','Personal flotation device','Piano','Picture frame','Pig','Pillow', + 'Pizza','Plate','Platter','Porch','Poster','Pumpkin','Rabbit','Rifle', + 'Roller skates','Rose','Salad','Sandal','Saucer','Saxophone','Scarf','Sea lion', + 'Sea turtle','Sheep','Shelf','Shirt','Shorts','Shrimp','Sink','Skateboard', + 'Ski','Skull','Skyscraper','Snake','Sock','Sofa bed','Sparrow','Spider','Spoon', + 'Sports uniform','Squirrel','Stairs','Stool','Strawberry','Street light', + 'Studio couch','Suit','Sun hat','Sunglasses','Surfboard','Sushi','Swan', + 'Swimming pool','Swimwear','Tank','Tap','Taxi','Tea','Teddy bear','Television', + 'Tent','Tie','Tiger','Tin can','Tire','Toilet','Tomato','Tortoise','Tower', + 'Traffic light','Train','Tripod','Truck','Trumpet','Umbrella','Van','Vase', + 'Vehicle registration plate','Violin','Wall clock','Waste container','Watch', + 'Whale','Wheel','Wheelchair','Whiteboard','Window','Wine','Wine glass','Woman', + 'Zebra','Zucchini'] + + x = '' + for v in MLPERF_CLASSES: + if x!='': x+=' ' + x+='"'+v+'"' + env['CM_DATASET_OPENIMAGES_CLASSES']=x + + return {'return': 0} + +def postprocess(i): + env = i['env'] + + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'annotations') + + if env.get('CM_DATASET_CALIBRATION','') == "no": + env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') + env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') + annotations_file_path = os.path.join(env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") + env['CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = annotations_file_path + if env.get("CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS",'') == "yes": + annotations_file_src = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] + shutil.copy(annotations_file_src, env['CM_DATASET_ANNOTATIONS_DIR_PATH']) + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'calibration', 'data') + annotations_file_path = os.path.join(env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-calibration-mlperf.json") + env['CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + + + return {'return': 0} diff --git a/script/get-dataset-openimages/run.bat b/script/get-dataset-openimages/run.bat new file mode 100644 index 0000000000..742542d251 --- /dev/null +++ b/script/get-dataset-openimages/run.bat @@ -0,0 +1,24 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +if not exist install mkdir install + +set INSTALL_DIR=%CUR_DIR%\install + +cd %CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH% + +if not "%CM_DATASET_SIZE%" == "" ( + set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42 +) else ( + set MAX_IMAGES= +) + +%CM_PYTHON_BIN% tools\openimages.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json --classes %CM_DATASET_OPENIMAGES_CLASSES% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rem Next is a hack to support MLPerf inference on Windows +cd %INSTALL_DIR% +if not exist validation\data\annotations mkdir validation\data\annotations +copy annotations\* validation\data\annotations diff --git a/script/get-dataset-openimages/run.sh b/script/get-dataset-openimages/run.sh new file mode 100644 index 0000000000..2fc6eaddf6 --- /dev/null +++ b/script/get-dataset-openimages/run.sh @@ -0,0 +1,40 @@ +#!/bin/bash +python3() { + ${CM_PYTHON_BIN_WITH_PATH} "$@" +} +export -f python3 + +CUR=${PWD} +mkdir -p install +INSTALL_DIR=${CUR}/install + +cd ${CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH} +cd tools +if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then + if [ ! -z ${CM_DATASET_SIZE} ]; then + max_images=" -m ${CM_DATASET_SIZE}" + else + max_images="" + fi + cmd="./openimages_mlperf.sh -d ${INSTALL_DIR} ${max_images}" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +else + if [ -n ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH} ]; then + calibration_file_string=" --calibration-file ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH}" + else + calibration_file_string="" + fi + cmd="./openimages_calibration_mlperf.sh -d \"${INSTALL_DIR} ${calibration_file_string}\"" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +fi +cd ${INSTALL_DIR} + +if [[ ! -d "open-images-v6-mlperf" ]]; then + ln -sf ../ open-images-v6-mlperf +fi + +test $? -eq 0 || exit 1 diff --git a/script/get-dataset-openorca/README.md b/script/get-dataset-openorca/README.md new file mode 100644 index 0000000000..8f64bb3ee1 --- /dev/null +++ b/script/get-dataset-openorca/README.md @@ -0,0 +1,175 @@ +Automatically generated README for this automation recipe: **get-dataset-openorca** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-openorca,9252c4d90d5940b7) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openorca)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,openorca,language-processing,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset openorca language-processing original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,openorca,language-processing,original` + +`cm run script --tags=get,dataset,openorca,language-processing,original[,variations] ` + +*or* + +`cmr "get dataset openorca language-processing original"` + +`cmr "get dataset openorca language-processing original [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openorca,language-processing,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,openorca,language-processing,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,openorca,language-processing,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset openorca language-processing original[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_CALIBRATION*: `yes` + - Workflow: + * **`_validation`** (default) + - Environment variables: + - *CM_DATASET_CALIBRATION*: `no` + - Workflow: + +
+ + + * Group "**size**" +
+ Click here to expand this section. + + * `_500` + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - Workflow: + * **`_60`** (default) + - Environment variables: + - *CM_DATASET_SIZE*: `60` + - Workflow: + * `_full` + - Environment variables: + - *CM_DATASET_SIZE*: `24576` + - Workflow: + * `_size.#` + - Environment variables: + - *CM_DATASET_SIZE*: `#` + - Workflow: + +
+ + +#### Default variations + +`_60,_validation` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET_CALIBRATION: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openorca/_cm.json)*** + * get,git,repo,_lfs,_repo.https://huggingface.co/datasets/Open-Orca/OpenOrca + * CM names: `--adr.['openorca-src']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openorca/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openorca/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openorca/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openorca/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-openorca/_cm.json) + +___ +### Script output +`cmr "get dataset openorca language-processing original [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_OPENORCA_PARQUET` +* `CM_DATASET_PATH` +* `CM_DATASET_PATH_ROOT` \ No newline at end of file diff --git a/script/get-dataset-openorca/_cm.json b/script/get-dataset-openorca/_cm.json new file mode 100644 index 0000000000..ad57e1e9c0 --- /dev/null +++ b/script/get-dataset-openorca/_cm.json @@ -0,0 +1,80 @@ +{ + "alias": "get-dataset-openorca", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "category_sort": 8500, + "default_env": { + "CM_DATASET_CALIBRATION": "no" + }, + "deps": [ + { + "force_env_keys": [ + "CM_GIT_*" + ], + "names": [ + "openorca-src" + ], + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_DATASET_OPENORCA_PATH" + }, + "tags": "get,git,repo,_lfs,_repo.https://huggingface.co/datasets/Open-Orca/OpenOrca", + "extra_cache_tags": "openorca,repo,src" + } + ], + "env": { + "CM_DATASET": "OPENORCA" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "openorca", + "language-processing", + "original" + ], + "uid": "9252c4d90d5940b7", + "variations": { + "60": { + "default": true, + "env": { + "CM_DATASET_SIZE": "60" + }, + "group": "size" + }, + "500": { + "env": { + "CM_DATASET_SIZE": "500" + }, + "group": "size" + }, + "calibration": { + "env": { + "CM_DATASET_CALIBRATION": "yes" + }, + "group": "dataset-type" + }, + "full": { + "env": { + "CM_DATASET_SIZE": "24576" + }, + "group": "size" + }, + "size.#": { + "env": { + "CM_DATASET_SIZE": "#" + }, + "group": "size" + }, + "validation": { + "default": true, + "env": { + "CM_DATASET_CALIBRATION": "no" + }, + "group": "dataset-type" + } + } +} diff --git a/script/get-dataset-openorca/customize.py b/script/get-dataset-openorca/customize.py new file mode 100644 index 0000000000..059c83826d --- /dev/null +++ b/script/get-dataset-openorca/customize.py @@ -0,0 +1,20 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + return {'return': 0} + +def postprocess(i): + env = i['env'] + if env.get('CM_DATASET_CALIBRATION','') == "no": + env['CM_DATASET_PATH_ROOT'] = env['CM_DATASET_OPENORCA_PATH'] + env['CM_DATASET_PATH'] = env['CM_DATASET_OPENORCA_PATH'] + env['CM_DATASET_OPENORCA_PARQUET'] = os.path.join(env['CM_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet') + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'calibration', 'data') + + return {'return': 0} diff --git a/script/get-dataset-squad-vocab/README.md b/script/get-dataset-squad-vocab/README.md new file mode 100644 index 0000000000..fa6bb057af --- /dev/null +++ b/script/get-dataset-squad-vocab/README.md @@ -0,0 +1,144 @@ +Automatically generated README for this automation recipe: **get-dataset-squad-vocab** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-squad-vocab,e38874fff5094577) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab` + +`cm run script --tags=get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab[,variations] ` + +*or* + +`cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab"` + +`cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get aux dataset-aux language-processing squad-aux vocab squad-vocab[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**download-source**" +
+ Click here to expand this section. + + * **`_from.zenodo`** (default) + - Environment variables: + - *CM_WGET_URL*: `https://zenodo.org/record/3733868/files/vocab.txt` + - Workflow: + +
+ + +#### Default variations + +`_from.zenodo` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad-vocab/_cm.json) + +___ +### Script output +`cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_SQUAD_VOCAB_PATH` +* `CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH` +#### New environment keys auto-detected from customize + +* `CM_DATASET_SQUAD_VOCAB_PATH` +* `CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH` \ No newline at end of file diff --git a/script/get-dataset-squad-vocab/_cm.json b/script/get-dataset-squad-vocab/_cm.json new file mode 100644 index 0000000000..2092cdd4ee --- /dev/null +++ b/script/get-dataset-squad-vocab/_cm.json @@ -0,0 +1,30 @@ +{ + "alias": "get-dataset-squad-vocab", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "new_env_keys": [ + "CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH", + "CM_DATASET_SQUAD_VOCAB_PATH" + ], + "tags": [ + "get", + "aux", + "dataset-aux", + "language-processing", + "squad-aux", + "vocab", + "squad-vocab" + ], + "uid": "e38874fff5094577", + "variations": { + "from.zenodo": { + "group": "download-source", + "default": true, + "env": { + "CM_WGET_URL": "https://zenodo.org/record/3733868/files/vocab.txt" + } + } + } +} diff --git a/script/get-dataset-squad-vocab/customize.py b/script/get-dataset-squad-vocab/customize.py new file mode 100644 index 0000000000..028e86137f --- /dev/null +++ b/script/get-dataset-squad-vocab/customize.py @@ -0,0 +1,19 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return':0} + + +def postprocess(i): + env = i['env'] + + env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] = os.path.join(os.getcwd(), 'vocab.txt') + env['CM_DATASET_SQUAD_VOCAB_PATH'] = os.path.join(os.getcwd(), 'vocab.txt') + + return {'return':0} diff --git a/script/get-dataset-squad-vocab/run.sh b/script/get-dataset-squad-vocab/run.sh new file mode 100644 index 0000000000..e16af2361e --- /dev/null +++ b/script/get-dataset-squad-vocab/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +wget -nc ${CM_WGET_URL} --no-check-certificate +test $? -eq 0 || exit 1 diff --git a/script/get-dataset-squad/README-extra.md b/script/get-dataset-squad/README-extra.md new file mode 100644 index 0000000000..4497abe6b5 --- /dev/null +++ b/script/get-dataset-squad/README-extra.md @@ -0,0 +1,20 @@ +# Downloads SQUAD Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the SQUAD dataset. + +## Usage + +``` +cm run script --tags=get,dataset,squad --version=[VERSION] +``` +where [VERSION] is one of +* `1.1` +* `2.0` + +## Exported Variables +* `CM_DATASET_SQUAD_PATH:` Directory path to SQUAD dataset +* `CM_DATASET_SQUAD_TRAIN_PATH:` JSON file path to SQUAD training dataset +* `CM_DATASET_SQUAD_VAL_PATH:` JSON file path to SQUAD validation dataset + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-dataset-squad/README.md b/script/get-dataset-squad/README.md new file mode 100644 index 0000000000..b2a3cc7b75 --- /dev/null +++ b/script/get-dataset-squad/README.md @@ -0,0 +1,131 @@ +Automatically generated README for this automation recipe: **get-dataset-squad** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dataset-squad,6651c119c3ae49b3) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,squad,language-processing,validation,original* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset squad language-processing validation original" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,squad,language-processing,validation,original` + +`cm run script --tags=get,dataset,squad,language-processing,validation,original ` + +*or* + +`cmr "get dataset squad language-processing validation original"` + +`cmr "get dataset squad language-processing validation original " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,squad,language-processing,validation,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,squad,language-processing,validation,original"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,squad,language-processing,validation,original) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset squad language-processing validation original" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `1.1` + +* `1.1` +* `2.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad/_cm.json)*** + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-squad/_cm.json) + +___ +### Script output +`cmr "get dataset squad language-processing validation original " -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_PATH` +* `CM_DATASET_SQUAD_PATH` +* `CM_DATASET_SQUAD_VAL_PATH` \ No newline at end of file diff --git a/script/get-dataset-squad/_cm.json b/script/get-dataset-squad/_cm.json new file mode 100644 index 0000000000..1f54ea40e4 --- /dev/null +++ b/script/get-dataset-squad/_cm.json @@ -0,0 +1,44 @@ +{ + "alias": "get-dataset-squad", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "default_version": "1.1", + "deps": [ + { + "tags": "get,sys-utils-cm" + } + ], + "env": { + "CM_DATASET": "SQUAD" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "squad", + "language-processing", + "validation", + "original" + ], + "uid": "6651c119c3ae49b3", + "versions": { + "1.1": { + "env": { + "CM_TRAIN_FILENAME": "train-v1.1.json", + "CM_VAL_FILENAME": "dev-v1.1.json", + "CM_WGET_URL": "https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v1.1.json" + } + }, + "2.0": { + "env": { + "CM_TRAIN_FILENAME": "train-v2.0.json", + "CM_VAL_FILENAME": "dev-v2.0.json", + "CM_WGET_URL": "https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v2.0.json" + } + } + } +} diff --git a/script/get-dataset-squad/customize.py b/script/get-dataset-squad/customize.py new file mode 100644 index 0000000000..0575d782b5 --- /dev/null +++ b/script/get-dataset-squad/customize.py @@ -0,0 +1,20 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return':0} + + +def postprocess(i): + env = i['env'] + + env['CM_DATASET_SQUAD_PATH'] = os.getcwd() + env['CM_DATASET_PATH'] = os.getcwd() + env['CM_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['CM_VAL_FILENAME']) + + return {'return':0} diff --git a/script/get-dataset-squad/run.sh b/script/get-dataset-squad/run.sh new file mode 100644 index 0000000000..6a7a7498ad --- /dev/null +++ b/script/get-dataset-squad/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +wget -nc ${CM_WGET_URL} --no-check-certificate +test $? -eq 0 || exit 1 + diff --git a/script/get-dlrm-data-mlperf-inference/README.md b/script/get-dlrm-data-mlperf-inference/README.md new file mode 100644 index 0000000000..04d466c8b5 --- /dev/null +++ b/script/get-dlrm-data-mlperf-inference/README.md @@ -0,0 +1,153 @@ +Automatically generated README for this automation recipe: **get-dlrm-data-mlperf-inference** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dlrm-data-mlperf-inference,34bdfcd9c8364935) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,dlrm,data,mlperf,inference* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dlrm data mlperf inference" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dlrm,data,mlperf,inference` + +`cm run script --tags=get,dlrm,data,mlperf,inference[,variations] [--input_flags]` + +*or* + +`cmr "get dlrm data mlperf inference"` + +`cmr "get dlrm data mlperf inference [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dlrm,data,mlperf,inference' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dlrm,data,mlperf,inference"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dlrm,data,mlperf,inference) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dlrm data mlperf inference[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_intel` + - Environment variables: + - *CM_DLRM_DATA_VARIATION*: `intel` + - Workflow: + * `_nvidia` + - Environment variables: + - *CM_DLRM_DATA_VARIATION*: `nvidia` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--dlrm_data_path=value` → `CM_DLRM_DATA_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "dlrm_data_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm-data-mlperf-inference/_cm.yaml) + +___ +### Script output +`cmr "get dlrm data mlperf inference [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DLRM_DATA_PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-dlrm-data-mlperf-inference/_cm.yaml b/script/get-dlrm-data-mlperf-inference/_cm.yaml new file mode 100644 index 0000000000..1f0918cc98 --- /dev/null +++ b/script/get-dlrm-data-mlperf-inference/_cm.yaml @@ -0,0 +1,22 @@ +alias: get-dlrm-data-mlperf-inference +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- dlrm +- data +- mlperf +- inference +uid: 34bdfcd9c8364935 +new_env_keys: + - CM_DLRM_DATA_PATH +input_mapping: + dlrm_data_path: CM_DLRM_DATA_PATH +variations: + nvidia: + env: + CM_DLRM_DATA_VARIATION: nvidia + intel: + env: + CM_DLRM_DATA_VARIATION: intel diff --git a/script/get-dlrm-data-mlperf-inference/customize.py b/script/get-dlrm-data-mlperf-inference/customize.py new file mode 100644 index 0000000000..d26e2b195a --- /dev/null +++ b/script/get-dlrm-data-mlperf-inference/customize.py @@ -0,0 +1,71 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + dlrm_data_path = env.get('CM_DLRM_DATA_PATH', '') + if dlrm_data_path == '' or not os.path.exists(dlrm_data_path): + return {'return': 1, 'error': f'Please input a valid path as --dlrm_data_path'} + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + variation = env['CM_DLRM_DATA_VARIATION'] + + if variation == "nvidia": + if not os.path.exists(os.path.join(dlrm_data_path, "model")): + return {'return': 1, 'error': f'model directory is missing inside {dlrm_data_path}'} + if not os.path.exists(os.path.join(dlrm_data_path, "criteo")): + return {'return': 1, 'error': f'criteo directory is missing inside {dlrm_data_path}'} + if not os.path.exists(os.path.join(dlrm_data_path, "model", "model_weights")): + return {'return': 1, 'error': f'model_weights directory is missing inside {dlrm_data_path}/model'} + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23")): + return {'return': 1, 'error': f'day23 directory is missing inside {dlrm_data_path}/day23'} + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32")): + return {'return': 1, 'error': f'fp32 directory is missing inside {dlrm_data_path}/day23'} + + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) and not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): + return {'return': 1, 'error': f'day_23_sparse_multi_hot.npz is missing inside {dlrm_data_path}/criteo/day23/fp32'} + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy")): + return {'return': 1, 'error': f'day_23_dense.npy is missing inside {dlrm_data_path}/criteo/day23/fp32'} + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy")): + return {'return': 1, 'error': f'day_23_labels.npy is missing inside {dlrm_data_path}/criteo/day23/fp32'} + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "raw_data")): + return {'return': 1, 'error': f'raw_data is missing inside {dlrm_data_path}/criteo/day23'} + + + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): + os.system(f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}") + + xsep = ' && ' + run_cmd = '' + if os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")): + file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz") + run_cmd = ("echo {} {} | md5sum -c").format('c46b7e31ec6f2f8768fa60bdfc0f6e40', file_path) + + if run_cmd != '': + run_cmd += xsep + + file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy") + run_cmd += ("echo {} {} | md5sum -c").format('cdf7af87cbc7e9b468c0be46b1767601', file_path) + + file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy") + run_cmd += xsep + ("echo {} {} | md5sum -c").format('dd68f93301812026ed6f58dfb0757fa7', file_path) + + env['CM_RUN_CMD'] = run_cmd + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DLRM_DATA_PATH'] + + return {'return':0} diff --git a/script/get-dlrm-data-mlperf-inference/run.sh b/script/get-dlrm-data-mlperf-inference/run.sh new file mode 100644 index 0000000000..d1cb7df69a --- /dev/null +++ b/script/get-dlrm-data-mlperf-inference/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "$CM_RUN_CMD" diff --git a/script/get-dlrm/README-extra.md b/script/get-dlrm/README-extra.md new file mode 100644 index 0000000000..8c70c36cd8 --- /dev/null +++ b/script/get-dlrm/README-extra.md @@ -0,0 +1,15 @@ +# Get DLRM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [DLRM repository](https://github.com/facebookdresearch/dlrm). + +## Commands +To install +``` +cm run script --tags=get,mlperf,dlrm,src +``` + +## Exported Variables +* `DLRM_DIR`: Directory path of the cloned dlrm repository + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-dlrm/README.md b/script/get-dlrm/README.md new file mode 100644 index 0000000000..9bda041d33 --- /dev/null +++ b/script/get-dlrm/README.md @@ -0,0 +1,145 @@ +Automatically generated README for this automation recipe: **get-dlrm** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-dlrm,63680ac2449a4241) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,src,dlrm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get src dlrm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,src,dlrm` + +`cm run script --tags=get,src,dlrm[,variations] ` + +*or* + +`cmr "get src dlrm"` + +`cmr "get src dlrm [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,dlrm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,src,dlrm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,src,dlrm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get src dlrm[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_full-history` + - Environment variables: + - *CM_GIT_DEPTH*: `` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_DEPTH: `--depth 10` +* CM_GIT_PATCH: `no` +* CM_GIT_URL: `https://github.com/facebookresearch/dlrm.git` + +
+ +#### Versions +Default version: `main` + +* `main` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dlrm/_cm.json) + +___ +### Script output +`cmr "get src dlrm [,variations]" -j` +#### New environment keys (filter) + +* `DLRM_DIR` +#### New environment keys auto-detected from customize diff --git a/script/get-dlrm/_cm.json b/script/get-dlrm/_cm.json new file mode 100644 index 0000000000..f99423b789 --- /dev/null +++ b/script/get-dlrm/_cm.json @@ -0,0 +1,41 @@ +{ + "alias": "get-dlrm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "default_version": "main", + "category": "AI/ML models", + "deps": [ + { + "tags": "detect,os" + } + ], + "default_env": { + "CM_GIT_DEPTH": "--depth 10", + "CM_GIT_PATCH": "no", + "CM_GIT_URL": "https://github.com/facebookresearch/dlrm.git" + }, + "new_env_keys": [ + "DLRM_DIR" + ], + "tags": [ + "get", + "src", + "dlrm" + ], + "uid": "63680ac2449a4241", + "variations": { + "full-history": { + "env": { + "CM_GIT_DEPTH": "" + } + } + }, + "versions": { + "main": { + "env": { + "CM_GIT_CHECKOUT": "main" + } + } + } +} diff --git a/script/get-dlrm/customize.py b/script/get-dlrm/customize.py new file mode 100644 index 0000000000..479efc350c --- /dev/null +++ b/script/get-dlrm/customize.py @@ -0,0 +1,37 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + need_version = env.get('CM_VERSION','') + versions = meta['versions'] + + if need_version!='' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + + env['DLRM_DIR'] = os.path.join(os.getcwd(), "dlrm") + + return {'return':0} + diff --git a/script/get-dlrm/run.sh b/script/get-dlrm/run.sh new file mode 100644 index 0000000000..37e9e59a7c --- /dev/null +++ b/script/get-dlrm/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning DLRM from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + +if [ ! -d "dlrm" ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} dlrm + if [ "${?}" != "0" ]; then exit 1; fi +fi diff --git a/script/get-docker/README.md b/script/get-docker/README.md new file mode 100644 index 0000000000..21a5b48883 --- /dev/null +++ b/script/get-docker/README.md @@ -0,0 +1,121 @@ +Automatically generated README for this automation recipe: **get-docker** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-docker,6192accce4234084) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-docker)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,install,docker,engine* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get install docker engine" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,install,docker,engine` + +`cm run script --tags=get,install,docker,engine ` + +*or* + +`cmr "get install docker engine"` + +`cmr "get install docker engine " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,docker,engine' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,install,docker,engine"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,install,docker,engine) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get install docker engine" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-docker/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-docker/_cm.json) + 1. ***Run native script if exists*** + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-docker/run-ubuntu.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-docker/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-docker/_cm.json) + +___ +### Script output +`cmr "get install docker engine " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-docker/_cm.json b/script/get-docker/_cm.json new file mode 100644 index 0000000000..7ce6cce2c5 --- /dev/null +++ b/script/get-docker/_cm.json @@ -0,0 +1,29 @@ +{ + "alias": "get-docker", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + { + "tags": "detect,os" + } + ], + "docker_input_mapping": {}, + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "install", + "docker", + "engine" + ], + "uid": "6192accce4234084", + "variations": {}, + "versions": {} +} diff --git a/script/get-docker/run-ubuntu.sh b/script/get-docker/run-ubuntu.sh new file mode 100644 index 0000000000..6aafc26aac --- /dev/null +++ b/script/get-docker/run-ubuntu.sh @@ -0,0 +1,40 @@ +#!/bin/bash +export DEBIAN_FRONTEND=noninteractive +sudo apt-get update +cmd="sudo apt-get install -y ca-certificates curl gnupg" +echo "$cmd" +eval "$cmd" + +test $? -eq 0 || exit $? + +if [[ ! -d /etc/apt/keyrings ]]; then + sudo install -m 0755 -d /etc/apt/keyrings +fi +test $? -eq 0 || exit $? + +cmd="curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg" +echo "$cmd" +eval "$cmd" + +sudo chmod a+r /etc/apt/keyrings/docker.gpg +echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get update +cmd="sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +if [[ -z $USER ]]; then + USER=`whoami` +fi + +cmd="sudo usermod -aG docker $USER" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? +#exec newgrp docker +#sudo su - $USER + diff --git a/script/get-gcc/README-extra.md b/script/get-gcc/README-extra.md new file mode 100644 index 0000000000..bb9d976944 --- /dev/null +++ b/script/get-gcc/README-extra.md @@ -0,0 +1,15 @@ +# Get GCC +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed gcc on the system. + +## Exported Variables +* `CM_GCC_BIN` +* `CM_GCC_BIN_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_WITH_PATH` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_COMPILER_*` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-gcc/README.md b/script/get-gcc/README.md new file mode 100644 index 0000000000..be1a5a1069 --- /dev/null +++ b/script/get-gcc/README.md @@ -0,0 +1,156 @@ +Automatically generated README for this automation recipe: **get-gcc** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-gcc,dbf4ab5cbed74372) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,gcc,compiler,c-compiler,cpp-compiler,get-gcc* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get gcc compiler c-compiler cpp-compiler get-gcc" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,gcc,compiler,c-compiler,cpp-compiler,get-gcc` + +`cm run script --tags=get,gcc,compiler,c-compiler,cpp-compiler,get-gcc ` + +*or* + +`cmr "get gcc compiler c-compiler cpp-compiler get-gcc"` + +`cmr "get gcc compiler c-compiler cpp-compiler get-gcc " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,gcc,compiler,c-compiler,cpp-compiler,get-gcc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,gcc,compiler,c-compiler,cpp-compiler,get-gcc"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,gcc,compiler,c-compiler,cpp-compiler,get-gcc) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get gcc compiler c-compiler cpp-compiler get-gcc" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-gcc/_cm.json)*** + * get,compiler-flags + - CM script: [get-compiler-flags](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-compiler-flags) + +___ +### Script output +`cmr "get gcc compiler c-compiler cpp-compiler get-gcc " -j` +#### New environment keys (filter) + +* `+ CFLAGS` +* `+ CXXFLAGS` +* `+ FFLAGS` +* `+ LDFLAGS` +* `+CM_HOST_OS_DEFAULT_INCLUDE_PATH` +* `+PATH` +* `CM_COMPILER_*` +* `CM_CXX_COMPILER_*` +* `CM_C_COMPILER_*` +* `CM_GCC_*` +* `CM_LINKER_*` +#### New environment keys auto-detected from customize + +* `CM_COMPILER_CACHE_TAGS` +* `CM_COMPILER_FAMILY` +* `CM_COMPILER_FLAGS_DEBUG` +* `CM_COMPILER_FLAGS_DEFAULT` +* `CM_COMPILER_FLAGS_FAST` +* `CM_COMPILER_VERSION` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_FLAG_OUTPUT` +* `CM_CXX_COMPILER_FLAG_VERSION` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_FLAG_OUTPUT` +* `CM_C_COMPILER_FLAG_VERSION` +* `CM_C_COMPILER_WITH_PATH` +* `CM_GCC_BIN` +* `CM_GCC_CACHE_TAGS` +* `CM_GCC_INSTALLED_PATH` +* `CM_LINKER_FLAGS_DEBUG` +* `CM_LINKER_FLAGS_DEFAULT` +* `CM_LINKER_FLAGS_FAST` \ No newline at end of file diff --git a/script/get-gcc/_cm.json b/script/get-gcc/_cm.json new file mode 100644 index 0000000000..92dd1baaee --- /dev/null +++ b/script/get-gcc/_cm.json @@ -0,0 +1,42 @@ +{ + "alias": "get-gcc", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Compiler automation", + "cache": true, + "clean_files": [], + "name": "Detect or install GCC compiler", + "new_env_keys": [ + "CM_GCC_*", + "CM_C_COMPILER_*", + "CM_CXX_COMPILER_*", + "CM_COMPILER_*", + "CM_LINKER_*", + "+ CFLAGS", + "+ CXXFLAGS", + "+ FFLAGS", + "+ LDFLAGS", + "+CM_HOST_OS_DEFAULT_INCLUDE_PATH", + "+PATH" + ], + "deps": [ + { + "tags": "detect,os" + } + ], + "post_deps": [ + { + "tags": "get,compiler-flags" + } + ], + "sort": 500, + "tags": [ + "get", + "gcc", + "compiler", + "c-compiler", + "cpp-compiler", + "get-gcc" + ], + "uid": "dbf4ab5cbed74372" +} diff --git a/script/get-gcc/customize.py b/script/get-gcc/customize.py new file mode 100644 index 0000000000..b29f38e13b --- /dev/null +++ b/script/get-gcc/customize.py @@ -0,0 +1,102 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + file_name_c = 'gcc.exe' if os_info['platform'] == 'windows' else 'gcc' + + if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': + if "12" in env.get('CM_VERSION', '') or "12" in env.get('CM_VERSION_MIN', ''): + if env.get('CM_TMP_PATH', '') == '': + env['CM_TMP_PATH'] = '' + env['CM_TMP_PATH'] += "/opt/rh/gcc-toolset-12/root/usr/bin" + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + if 'CM_GCC_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name_c, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_GCC_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : +# if r['return'] == 16: +# if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': +# return r +# +# print (recursion_spaces+' # {}'.format(r['error'])) +# +# # Attempt to run installer +# r = {'return':0, 'skip':True, 'script':{'tags':'install,gcc,src'}} + + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r' \(.*\)\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_GCC_VERSION', + 'which_env':i['env']}) + if r['return'] >0: + if 'clang' in r['error']: + return {'return':0, 'version':-1} + return r + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] >0: return r + + env['CM_COMPILER_FAMILY'] = 'GCC' + version = r['version'] + env['CM_COMPILER_VERSION'] = env['CM_GCC_VERSION'] + env['CM_GCC_CACHE_TAGS'] = 'version-'+version + env['CM_COMPILER_CACHE_TAGS'] = 'version-'+version+',family-gcc' + + found_file_path = env['CM_GCC_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_GCC_INSTALLED_PATH'] = found_path + + file_name_c = os.path.basename(found_file_path) + # G: changed next line to handle cases like gcc-8 + file_name_cpp = file_name_c.replace('gcc','g++') + env['FILE_NAME_CPP'] = file_name_cpp + + env['CM_GCC_BIN']=file_name_c + + # General compiler for general program compilation + env['CM_C_COMPILER_BIN']=file_name_c + env['CM_C_COMPILER_FLAG_OUTPUT']='-o ' + env['CM_C_COMPILER_WITH_PATH']=found_file_path + env['CM_C_COMPILER_FLAG_VERSION']='--version' + + env['CM_CXX_COMPILER_BIN']=file_name_cpp + env['CM_CXX_COMPILER_WITH_PATH']=os.path.join(found_path, file_name_cpp) + env['CM_CXX_COMPILER_FLAG_OUTPUT']='-o ' + env['CM_CXX_COMPILER_FLAG_VERSION']='--version' + + env['CM_COMPILER_FLAGS_FAST'] = "-O3" + env['CM_LINKER_FLAGS_FAST'] = "-O3" + env['CM_COMPILER_FLAGS_DEBUG'] = "-O0" + env['CM_LINKER_FLAGS_DEBUG'] = "-O0" + env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['CM_LINKER_FLAGS_DEFAULT'] = "-O2" + + + return {'return':0, 'version': version} diff --git a/script/get-gcc/run.bat b/script/get-gcc/run.bat new file mode 100644 index 0000000000..fac96d8340 --- /dev/null +++ b/script/get-gcc/run.bat @@ -0,0 +1,3 @@ +%CM_GCC_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-gcc/run.sh b/script/get-gcc/run.sh new file mode 100644 index 0000000000..08be81f21f --- /dev/null +++ b/script/get-gcc/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +gcc_bin=${CM_GCC_BIN_WITH_PATH} +echo "${gcc_bin} --version" + +${gcc_bin} --version > tmp-ver.out +test $? -eq 0 || exit 1 + +cat tmp-ver.out diff --git a/script/get-generic-python-lib/README-extra.md b/script/get-generic-python-lib/README-extra.md new file mode 100644 index 0000000000..5d320ba2b4 --- /dev/null +++ b/script/get-generic-python-lib/README-extra.md @@ -0,0 +1,6 @@ +## Variation onnxruntime_gpu + +### Windows + +* General installation notes: https://onnxruntime.ai/docs/install +* Notes about dependencies: [link](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html). diff --git a/script/get-generic-python-lib/README.md b/script/get-generic-python-lib/README.md new file mode 100644 index 0000000000..d521e60cb7 --- /dev/null +++ b/script/get-generic-python-lib/README.md @@ -0,0 +1,679 @@ +Automatically generated README for this automation recipe: **get-generic-python-lib** + +Category: **Python automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-generic-python-lib,94b62a682bc44791) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,generic-python-lib* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get generic-python-lib" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,generic-python-lib` + +`cm run script --tags=get,generic-python-lib[,variations] [--input_flags]` + +*or* + +`cmr "get generic-python-lib"` + +`cmr "get generic-python-lib [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,generic-python-lib' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,generic-python-lib"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,generic-python-lib) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get generic-python-lib[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_Pillow` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `Pillow` + - Workflow: + * `_apache-tvm` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `apache-tvm` + - *CM_GENERIC_PYTHON_PIP_EXTRA*: ` --pre` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_typing_extensions + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_apex` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `apex` + - Workflow: + * `_async_timeout` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `async_timeout` + - Workflow: + * `_attr` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `attr` + - Workflow: + * `_attrs` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `attrs` + - Workflow: + * `_boto3` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `boto3` + - Workflow: + * `_cloudpickle` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `cloudpickle` + - Workflow: + * `_cmind` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `cmind` + - Workflow: + * `_colored` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `colored` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://pypi.ngc.nvidia.com` + - Workflow: + * `_conda.#` + - Workflow: + * `_cupy` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `cupy` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_custom-python` + - Environment variables: + - *CM_TMP_USE_CUSTOM_PYTHON*: `on` + - Workflow: + * `_datasets` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `datasets` + - Workflow: + * `_decorator` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `decorator` + - Workflow: + * `_deepsparse` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `deepsparse` + - Workflow: + * `_dllogger` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `dllogger` + - *CM_GENERIC_PYTHON_PIP_URL*: `git+https://github.com/NVIDIA/dllogger#egg=dllogger` + - Workflow: + * `_fiftyone` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `fiftyone` + - Workflow: + * `_google-api-python-client` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `google_api_python_client` + - Workflow: + * `_google-auth-oauthlib` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `google_auth_oauthlib` + - Workflow: + * `_huggingface_hub` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `huggingface_hub` + - Workflow: + * `_inflect` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `inflect` + - Workflow: + * `_jax` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `jax` + - Workflow: + * `_jax_cuda` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `jax[cuda]` + - *CM_GENERIC_PYTHON_PIP_EXTRA*: `-f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html` + - *CM_JAX_VERSION_EXTRA*: `CUDA` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_librosa` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `librosa` + - Workflow: + * `_matplotlib` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `matplotlib` + - Workflow: + * `_mlperf_loadgen` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `mlperf_loadgen` + - *CM_GENERIC_PYTHON_PIP_URL*: `git+https://github.com/mlcommons/inference.git#subdirectory=loadgen` + - Workflow: + * `_mlperf_logging` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `mlperf_logging` + - *CM_GENERIC_PYTHON_PIP_URL*: `git+https://github.com/mlperf/logging.git` + - Workflow: + * `_mpld3` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `mpld3` + - Workflow: + * `_nibabel` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `nibabel` + - Workflow: + * `_numpy` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `numpy` + - Workflow: + * `_nvidia-apex` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `apex` + - *CM_GENERIC_PYTHON_PACKAGE_VARIANT*: `nvidia-apex` + - *CM_GENERIC_PYTHON_PIP_URL*: `git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880` + - Workflow: + * `_nvidia-apex-from-src` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `apex` + - *CM_GENERIC_PYTHON_PACKAGE_VARIANT*: `nvidia-apex` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,generic-python-lib,_torch_cuda + * CM names: `--adr.['torch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,git,repo,_repo.https://github.com/NVIDIA/apex,_tag.23.05 + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * `_nvidia-dali` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `nvidia-dali-cuda120` + - *CM_GENERIC_PYTHON_PIP_EXTRA*: ` --upgrade --default-timeout=900` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://developer.download.nvidia.com/compute/redist` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_nvidia-pycocotools` + - Environment variables: + - *CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS*: `pycocotools` + - *CM_GENERIC_PYTHON_PIP_URL*: `pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.cython + * CM names: `--adr.['cython']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.numpy + * CM names: `--adr.['numpy']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_nvidia-pyindex` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `nvidia-pyindex` + - Workflow: + * `_nvidia-tensorrt` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `nvidia-tensorrt` + - Workflow: + * `_onnx` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `onnx` + - Workflow: + * `_onnx-graphsurgeon` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `onnx_graphsurgeon` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.nvidia-pyindex + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_onnxruntime` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `onnxruntime` + - Workflow: + * `_onnxruntime,rocm` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `onnxruntime-training` + - *CM_GENERIC_PYTHON_PIP_URL*: `https://download.onnxruntime.ai/onnxruntime_training-1.16.0%2Brocm56-cp3<<>>-cp3<<>>-manylinux_2_17_x86_64.manylinux2014_x86_64.whl` + - Workflow: + * `_onnxruntime_gpu` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `onnxruntime_gpu` + - *CM_ONNXRUNTIME_VERSION_EXTRA*: `GPU` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_opencv-python` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `opencv-python` + - Workflow: + * `_package.#` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `#` + - *CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS*: `` + - *CM_GENERIC_PYTHON_PIP_URL*: `` + - Workflow: + * `_pandas` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `pandas` + - Workflow: + * `_path.#` + - Environment variables: + - *CM_GENERIC_PYTHON_PIP_URL*: `#` + - Workflow: + * `_pillow` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `Pillow` + - Workflow: + * `_pip` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `pip` + - Workflow: + * `_polygraphy` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `polygraphy` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://pypi.ngc.nvidia.com` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_colored + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_pre` + - Environment variables: + - *CM_GENERIC_PYTHON_DEV_VERSION*: `yes` + - Workflow: + * `_protobuf` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `protobuf` + - Workflow: + * `_psutil` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `psutil` + - Workflow: + * `_pycocotools` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `pycocotools` + - Workflow: + * `_pycuda` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `pycuda` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_ray` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `ray[default]` + - Workflow: + * `_requests` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `requests` + - Workflow: + * `_rocm` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,rocm + * CM names: `--adr.['rocm']...` + - CM script: [get-rocm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-rocm) + * `_safetensors` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `safetensors` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,rust-compiler + * `if (CM_HOST_PLATFORM_FLAVOR != x86_64)` + - CM script: [get-compiler-rust](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-compiler-rust) + * `_scikit-learn` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `scikit-learn` + - Workflow: + * `_scipy` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `scipy` + - Workflow: + * `_scons` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `scons` + - Workflow: + * `_setfit` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `setfit` + - Workflow: + * `_setuptools` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `setuptools` + - Workflow: + * `_six` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `six` + - Workflow: + * `_sklearn` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `sklearn` + - Workflow: + * `_sox` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `sox` + - Workflow: + * `_sparsezoo` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `sparsezoo` + - Workflow: + * `_streamlit` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `streamlit` + - Workflow: + * `_streamlit_option_menu` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `streamlit_option_menu` + - Workflow: + * `_tensorboard` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tensorboard` + - Workflow: + * `_tensorflow` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tensorflow` + - Workflow: + * `_tensorflow,rocm` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tensorflow-rocm` + - Workflow: + * `_tensorrt` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tensorrt` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://download.pytorch.org/whl/<<>>` + - *CM_TORCH_VERSION_EXTRA*: `CUDA` + - Workflow: + * `_tflite` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tflite` + - Workflow: + * `_tflite-runtime` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tflite-runtime` + - Workflow: + * `_tokenization` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tokenization` + - Workflow: + * `_toml` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `toml` + - Workflow: + * `_torch` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torch` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://download.pytorch.org/whl/cpu` + - Workflow: + * `_torch,pre` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torch` + - *CM_GENERIC_PYTHON_PIP_EXTRA*: ` --pre` + - *CM_GENERIC_PYTHON_PIP_INDEX_URL*: `https://download.pytorch.org/whl/nightly/cpu` + - Workflow: + * `_torch,rocm` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torch` + - *CM_GENERIC_PYTHON_PIP_INDEX_URL*: `https://download.pytorch.org/whl/rocm5.6` + - *CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS*: `torch` + - Workflow: + 1. ***Read "post_deps" on other CM scripts*** + * get,generic-python-lib,_torchvision,_rocm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchaudio,_rocm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_torch_cuda` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torch` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1*: `https://download.pytorch.org/whl/<<>>` + - *CM_TORCH_VERSION_EXTRA*: `CUDA` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_torch_cuda,pre` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torch` + - *CM_GENERIC_PYTHON_PIP_EXTRA*: ` --pre` + - *CM_GENERIC_PYTHON_PIP_INDEX_URL*: `https://download.pytorch.org/whl/<<>>` + - *CM_TORCH_VERSION_EXTRA*: `CUDA` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_torch_tensorrt` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torch-tensorrt` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://download.pytorch.org/whl/<<>>` + - *CM_TORCH_VERSION_EXTRA*: `CUDA` + - Workflow: + * `_torchaudio` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torchaudio` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://download.pytorch.org/whl/cpu` + - Workflow: + * `_torchaudio,rocm` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torchaudio` + - *CM_GENERIC_PYTHON_PIP_INDEX_URL*: `https://download.pytorch.org/whl/rocm5.6` + - *CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS*: `torchaudio` + - Workflow: + * `_torchaudio_cuda` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torchaudio` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1*: `https://download.pytorch.org/whl/<<>>` + - *CM_TORCHAUDIO_VERSION_EXTRA*: `CUDA` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_torchvision` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torchvision` + - *CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL*: `https://download.pytorch.org/whl/cpu` + - Workflow: + * `_torchvision,rocm` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torchvision` + - *CM_GENERIC_PYTHON_PIP_INDEX_URL*: `https://download.pytorch.org/whl/rocm5.6` + - *CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS*: `torchvision` + - Workflow: + * `_torchvision_cuda` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `torchvision` + - *CM_TORCHVISION_VERSION_EXTRA*: `CUDA` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_tornado` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tornado` + - Workflow: + * `_tqdm` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `tqdm` + - Workflow: + * `_transformers` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `transformers` + - Workflow: + * `_typing_extensions` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `typing_extensions` + - Workflow: + * `_ujson` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `ujson` + - Workflow: + * `_unidecode` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `unidecode` + - Workflow: + * `_url.#` + - Environment variables: + - *CM_GENERIC_PYTHON_PIP_URL*: `#` + - *CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL*: `yes` + - Workflow: + * `_wandb` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `wandb` + - Workflow: + * `_west` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `west` + - Workflow: + * `_xgboost` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `xgboost` + - Workflow: + * `_xlsxwriter` + - Environment variables: + - *CM_GENERIC_PYTHON_PACKAGE_NAME*: `xlsxwriter` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--extra_index_url=value` → `CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL=value` +* `--force_install=value` → `CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL=value` +* `--index_url=value` → `CM_GENERIC_PYTHON_PIP_INDEX_URL=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "extra_index_url":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_TMP_USE_CUSTOM_PYTHON != on)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_pip + * `if (CM_GENERIC_PYTHON_PACKAGE_NAME != pip)` + * CM names: `--adr.['python-pip', 'pip']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/_cm.json)*** + * install,onnxruntime,from.src,_cuda + * `if (CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC == yes)` + - CM script: [install-onnxruntime-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onnxruntime-from-src) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-python-lib/_cm.json) + +___ +### Script output +`cmr "get generic-python-lib [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_PYTHONLIB_*` +#### New environment keys auto-detected from customize diff --git a/script/get-generic-python-lib/_cm.json b/script/get-generic-python-lib/_cm.json new file mode 100644 index 0000000000..fa84d62fa5 --- /dev/null +++ b/script/get-generic-python-lib/_cm.json @@ -0,0 +1,1073 @@ +{ + "alias": "get-generic-python-lib", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Python automation", + "clean_files": [], + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_TMP_USE_CUSTOM_PYTHON": [ + "on" + ] + }, + "tags": "get,python3" + }, + { + "names": [ + "python-pip", + "pip" + ], + "skip_if_env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": [ + "pip" + ] + }, + "tags": "get,generic-python-lib,_pip" + } + ], + "extra_cache_tags_from_env": [ + { + "env": "CM_PYTHON_CACHE_TAGS", + "prefix": "python-" + } + ], + "input_mapping": { + "extra_index_url": "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL", + "force_install": "CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL", + "index_url": "CM_GENERIC_PYTHON_PIP_INDEX_URL" + }, + "local_env_keys": [ + "CM_GENERIC_PYTHON_PACKAGE_VARIANT" + ], + "new_env_keys": [ + "CM_PYTHONLIB_*" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC": [ + "yes" + ] + }, + "tags": "install,onnxruntime,from.src,_cuda" + } + ], + "tags": [ + "get", + "install", + "generic", + "generic-python-lib" + ], + "tags_help": "get generic-python-lib", + "uid": "94b62a682bc44791", + "variations": { + "Pillow": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "Pillow" + }, + "new_env_keys": [ + "CM_PILLOW_VERSION" + ] + }, + "apache-tvm": { + "deps": [ + { + "tags": "get,generic-python-lib,_typing_extensions" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "apache-tvm", + "CM_GENERIC_PYTHON_PIP_EXTRA": " --pre" + }, + "new_env_keys": [ + "CM_APACHE_TVM_VERSION" + ] + }, + "apex": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "apex" + }, + "new_env_keys": [ + "CM_APEX_VERSION" + ] + }, + "async_timeout": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "async_timeout" + }, + "new_env_keys": [ + "CM_ASYNC_TIMEOUT_VERSION" + ] + }, + "attr": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "attr" + }, + "new_env_keys": [ + "CM_ATTR_VERSION" + ] + }, + "attrs": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "attrs" + }, + "new_env_keys": [ + "CM_ATTRS_VERSION" + ] + }, + "boto3": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "boto3" + }, + "new_env_keys": [ + "CM_BOTO3_VERSION" + ] + }, + "cloudpickle": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "cloudpickle" + }, + "new_env_keys": [ + "CM_CLOUDPICKLE_VERSION" + ] + }, + "cmind": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "cmind" + }, + "new_env_keys": [ + "CM_CMIND_VERSION" + ] + }, + "colored": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "colored", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://pypi.ngc.nvidia.com" + }, + "new_env_keys": [ + "CM_COLORED_VERSION" + ] + }, + "conda.#": { + "ad": { + "python-pip": { + "tags": "_conda.#" + }, + "python3": { + "tags": "_conda.#" + } + } + }, + "cupy": { + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "cupy" + }, + "new_env_keys": [ + "CM_CUPY_VERSION" + ] + }, + "custom-python": { + "ad": { + "python-pip": { + "tags": "_custom-python" + } + }, + "env": { + "CM_TMP_USE_CUSTOM_PYTHON": "on" + } + }, + "datasets": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "datasets" + }, + "new_env_keys": [ + "CM_DATASETS_VERSION" + ] + }, + "decorator": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "decorator" + }, + "new_env_keys": [ + "CM_DECORATOR_VERSION" + ] + }, + "deepsparse": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "deepsparse" + }, + "new_env_keys": [ + "CM_DEEPSPARSE_VERSION" + ] + }, + "dllogger": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "dllogger", + "CM_GENERIC_PYTHON_PIP_URL": "git+https://github.com/NVIDIA/dllogger#egg=dllogger" + } + }, + "fiftyone": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "fiftyone" + }, + "new_env_keys": [ + "CM_FIFTYONE_VERSION" + ] + }, + "google-api-python-client": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "google_api_python_client" + }, + "new_env_keys": [ + "CM_GOOGLE_API_PYTHON_CLIENT_VERSION" + ] + }, + "google-auth-oauthlib": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "google_auth_oauthlib" + }, + "new_env_keys": [ + "CM_GOOGLE_AUTH_OAUTHLIB_VERSION" + ] + }, + "huggingface_hub": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "huggingface_hub" + }, + "new_env_keys": [ + "CM_HUGGINGFACE_HUB_VERSION" + ] + }, + "inflect": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "inflect" + }, + "new_env_keys": [ + "CM_INFLECT_VERSION" + ] + }, + "jax": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "jax" + }, + "new_env_keys": [ + "CM_JAX_VERSION*" + ] + }, + "jax_cuda": { + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "jax[cuda]", + "CM_GENERIC_PYTHON_PIP_EXTRA": "-f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html", + "CM_JAX_VERSION_EXTRA": "CUDA" + }, + "new_env_keys": [ + "CM_JAX_VERSION*" + ] + }, + "librosa": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "librosa" + }, + "new_env_keys": [ + "CM_LIBROSA_VERSION" + ] + }, + "matplotlib": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "matplotlib" + }, + "new_env_keys": [ + "CM_MATPLOTLIB_VERSION" + ] + }, + "mlperf_loadgen": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "mlperf_loadgen", + "CM_GENERIC_PYTHON_PIP_URL": "git+https://github.com/mlcommons/inference.git#subdirectory=loadgen" + }, + "new_env_keys": [ + "CM_MLPERF_LOADGEN_VERSION" + ] + }, + "mlperf_logging": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "mlperf_logging", + "CM_GENERIC_PYTHON_PIP_URL": "git+https://github.com/mlperf/logging.git" + }, + "new_env_keys": [ + "CM_MLPERF_LOGGING_VERSION" + ] + }, + "mpld3": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "mpld3" + }, + "new_env_keys": [ + "CM_MPLD3_VERSION" + ] + }, + "nibabel": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "nibabel" + }, + "new_env_keys": [ + "CM_NIBABEL_VERSION" + ] + }, + "numpy": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "numpy" + }, + "new_env_keys": [ + "CM_NUMPY_VERSION" + ] + }, + "nvidia-apex": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "apex", + "CM_GENERIC_PYTHON_PACKAGE_VARIANT": "nvidia-apex", + "CM_GENERIC_PYTHON_PIP_URL": "git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880" + }, + "new_env_keys": [ + "CM_NVIDIA_APEX_VERSION" + ] + }, + "nvidia-apex-from-src": { + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + }, + { + "names": [ + "torch" + ], + "tags": "get,generic-python-lib,_torch_cuda" + }, + { + "env": { + "CM_GIT_CHECKOUT_FOLDER": "apex" + }, + "extra_cache_tags": "nvidia-apex", + "tags": "get,git,repo,_repo.https://github.com/NVIDIA/apex,_tag.23.05" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "apex", + "CM_GENERIC_PYTHON_PACKAGE_VARIANT": "nvidia-apex" + }, + "new_env_keys": [ + "CM_NVIDIA_APEX_VERSION" + ] + }, + "nvidia-dali": { + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "nvidia-dali-cuda120", + "CM_GENERIC_PYTHON_PIP_EXTRA": " --upgrade --default-timeout=900", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://developer.download.nvidia.com/compute/redist" + }, + "new_env_keys": [ + "CM_NVIDIA_DALI_VERSION" + ] + }, + "nvidia-pycocotools": { + "base": [ + "pycocotools" + ], + "deps": [ + { + "names": [ + "cython" + ], + "tags": "get,generic-python-lib,_package.cython" + }, + { + "names": [ + "numpy" + ], + "tags": "get,generic-python-lib,_package.numpy" + } + ], + "env": { + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "pycocotools", + "CM_GENERIC_PYTHON_PIP_URL": "pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI" + } + }, + "nvidia-pyindex": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "nvidia-pyindex" + }, + "new_env_keys": [ + "CM_NVIDIA_PYINDEX_VERSION" + ] + }, + "nvidia-tensorrt": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "nvidia-tensorrt" + }, + "new_env_keys": [ + "CM_NVIDIA_TENSORRT_VERSION" + ] + }, + "onnx": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "onnx" + }, + "new_env_keys": [ + "CM_ONNX_VERSION" + ] + }, + "onnx-graphsurgeon": { + "deps": [ + { + "tags": "get,generic-python-lib,_package.nvidia-pyindex" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "onnx_graphsurgeon" + }, + "new_env_keys": [ + "CM_ONNX_GRAPHSURGEON_VERSION" + ] + }, + "onnxruntime": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "onnxruntime" + }, + "new_env_keys": [ + "CM_ONNXRUNTIME_VERSION" + ] + }, + "onnxruntime,rocm": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "onnxruntime-training", + "CM_GENERIC_PYTHON_PIP_URL": "https://download.onnxruntime.ai/onnxruntime_training-1.16.0%2Brocm56-cp3<<>>-cp3<<>>-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + }, + "new_env_keys": [ + "CM_ONNXRUNTIME_TRAINING_VERSION*" + ] + }, + "onnxruntime_gpu": { + "default_env": { + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "onnxruntime" + }, + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "onnxruntime_gpu", + "CM_ONNXRUNTIME_VERSION_EXTRA": "GPU" + }, + "new_env_keys": [ + "CM_ONNXRUNTIME_GPU_VERSION*" + ] + }, + "opencv-python": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "opencv-python" + }, + "new_env_keys": [ + "CM_OPENCV_PYTHON_VERSION" + ] + }, + "package.#": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "#", + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "", + "CM_GENERIC_PYTHON_PIP_URL": "" + } + }, + "pandas": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "pandas" + }, + "new_env_keys": [ + "CM_PANDAS_VERSION" + ] + }, + "path.#": { + "env": { + "CM_GENERIC_PYTHON_PIP_URL": "#" + } + }, + "pillow": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "Pillow" + }, + "new_env_keys": [ + "CM_PILLOW_VERSION" + ] + }, + "pip": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "pip" + }, + "new_env_keys": [ + "CM_PIP_VERSION", + "CM_PYTHON_PIP_COMMON_EXTRA" + ] + }, + "polygraphy": { + "deps": [ + { + "tags": "get,generic-python-lib,_colored" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "polygraphy", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://pypi.ngc.nvidia.com" + }, + "new_env_keys": [ + "CM_POLYGRAPHY_VERSION" + ] + }, + "pre": { + "env": { + "CM_GENERIC_PYTHON_DEV_VERSION": "yes" + } + }, + "protobuf": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "protobuf" + }, + "new_env_keys": [ + "CM_PROTOBUF_VERSION" + ] + }, + "psutil": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "psutil" + }, + "new_env_keys": [ + "CM_PSUTIL_VERSION" + ] + }, + "pycocotools": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "pycocotools" + }, + "new_env_keys": [ + "CM_PYCOCOTOOLS_VERSION" + ] + }, + "pycuda": { + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "pycuda" + }, + "new_env_keys": [ + "CM_PYCUDA_VERSION" + ] + }, + "ray": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "ray[default]" + }, + "new_env_keys": [ + "CM_RAY_VERSION" + ] + }, + "requests": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "requests" + }, + "new_env_keys": [ + "CM_REQUESTS_VERSION" + ] + }, + "rocm": { + "deps": [ + { + "names": [ + "rocm" + ], + "tags": "get,rocm" + } + ], + "env": {} + }, + "safetensors": { + "deps": [ + { + "skip_if_env": { + "CM_HOST_PLATFORM_FLAVOR": [ + "x86_64" + ] + }, + "tags": "get,rust-compiler" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "safetensors" + }, + "new_env_keys": [ + "CM_SAFETENSORS_VERSION" + ] + }, + "scikit-learn": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "scikit-learn" + }, + "new_env_keys": [ + "CM_SCIKIT_LEARN_VERSION" + ] + }, + "scipy": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "scipy" + }, + "new_env_keys": [ + "CM_SCIPY_VERSION" + ] + }, + "scons": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "scons" + }, + "new_env_keys": [ + "CM_SCONS_VERSION" + ] + }, + "setfit": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "setfit" + }, + "new_env_keys": [ + "CM_SETFIT_VERSION" + ] + }, + "setuptools": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "setuptools" + }, + "new_env_keys": [ + "CM_SETUPTOOL_VERSION" + ] + }, + "six": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "six" + }, + "new_env_keys": [ + "CM_SIX_VERSION" + ] + }, + "sklearn": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "sklearn" + }, + "new_env_keys": [ + "CM_SKLEARN_VERSION" + ] + }, + "sox": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "sox" + }, + "new_env_keys": [ + "CM_SOX_VERSION" + ] + }, + "sparsezoo": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "sparsezoo" + }, + "new_env_keys": [ + "CM_SPARSEZOO_VERSION" + ] + }, + "streamlit": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "streamlit" + }, + "new_env_keys": [ + "CM_STREAMLIT_VERSION" + ] + }, + "streamlit_option_menu": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "streamlit_option_menu" + }, + "new_env_keys": [ + "CM_STREAMLIT_OPTION_MENU_VERSION" + ] + }, + "tensorboard": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tensorboard" + }, + "new_env_keys": [ + "CM_TENSORBOARD_VERSION" + ] + }, + "tensorflow": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tensorflow" + }, + "new_env_keys": [ + "CM_TENSORFLOW_VERSION" + ] + }, + "tensorflow,rocm": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tensorflow-rocm" + }, + "new_env_keys": [ + "CM_TENSORFLOW_ROCM_VERSION" + ] + }, + "tensorrt": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tensorrt", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://download.pytorch.org/whl/<<>>", + "CM_TORCH_VERSION_EXTRA": "CUDA" + }, + "new_env_keys": [ + "CM_TENSORRT_VERSION" + ] + }, + "tflite": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tflite" + }, + "new_env_keys": [ + "CM_TFLITE_VERSION" + ] + }, + "tflite-runtime": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tflite-runtime" + }, + "new_env_keys": [ + "CM_TFLITE_RUNTIME_VERSION" + ] + }, + "tokenization": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tokenization" + }, + "new_env_keys": [ + "CM_TOKENIZATION_VERSION" + ] + }, + "toml": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "toml" + }, + "new_env_keys": [ + "CM_TOML_VERSION" + ] + }, + "torch": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torch", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://download.pytorch.org/whl/cpu" + }, + "new_env_keys": [ + "CM_TORCH_VERSION*" + ] + }, + "torch,pre": { + "default_env": { + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "torch" + }, + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torch", + "CM_GENERIC_PYTHON_PIP_EXTRA": " --pre", + "CM_GENERIC_PYTHON_PIP_INDEX_URL": "https://download.pytorch.org/whl/nightly/cpu" + }, + "new_env_keys": [ + "CM_TORCH_VERSION*" + ] + }, + "torch,rocm": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torch", + "CM_GENERIC_PYTHON_PIP_INDEX_URL": "https://download.pytorch.org/whl/rocm5.6", + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "torch" + }, + "new_env_keys": [ + "CM_TORCH_VERSION*" + ], + "post_deps": [ + { + "tags": "get,generic-python-lib,_torchvision,_rocm" + }, + { + "tags": "get,generic-python-lib,_torchaudio,_rocm" + } + ] + }, + "torch_cuda": { + "default_env": { + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1": "torch" + }, + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torch", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1": "https://download.pytorch.org/whl/<<>>", + "CM_TORCH_VERSION_EXTRA": "CUDA" + }, + "new_env_keys": [ + "CM_TORCH_VERSION*" + ] + }, + "torch_cuda,pre": { + "default_env": { + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "torch_cuda" + }, + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + }, + { + "tags": "get,generic-python-lib,_numpy" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torch", + "CM_GENERIC_PYTHON_PIP_EXTRA": " --pre", + "CM_GENERIC_PYTHON_PIP_INDEX_URL": "https://download.pytorch.org/whl/<<>>", + "CM_TORCH_VERSION_EXTRA": "CUDA" + }, + "new_env_keys": [ + "CM_TORCH_VERSION*" + ] + }, + "torch_tensorrt": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torch-tensorrt", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://download.pytorch.org/whl/<<>>", + "CM_TORCH_VERSION_EXTRA": "CUDA" + }, + "new_env_keys": [ + "CM_TORCH_TENSORRT_VERSION" + ] + }, + "torchaudio": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torchaudio", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://download.pytorch.org/whl/cpu" + }, + "new_env_keys": [ + "CM_TORCHAUDIO_VERSION*" + ] + }, + "torchaudio,rocm": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torchaudio", + "CM_GENERIC_PYTHON_PIP_INDEX_URL": "https://download.pytorch.org/whl/rocm5.6", + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "torchaudio" + }, + "new_env_keys": [ + "CM_TORCHAUDIO_VERSION*" + ] + }, + "torchaudio_cuda": { + "default_env": { + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "torchaudio" + }, + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torchaudio", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1": "https://download.pytorch.org/whl/<<>>", + "CM_TORCHAUDIO_VERSION_EXTRA": "CUDA" + }, + "new_env_keys": [ + "CM_TORCHAUDIO_VERSION*" + ] + }, + "torchvision": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torchvision", + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "https://download.pytorch.org/whl/cpu" + }, + "new_env_keys": [ + "CM_TORCHVISION_VERSION*" + ] + }, + "torchvision,rocm": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torchvision", + "CM_GENERIC_PYTHON_PIP_INDEX_URL": "https://download.pytorch.org/whl/rocm5.6", + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "torchvision" + }, + "new_env_keys": [ + "CM_TORCHVISION_VERSION*" + ] + }, + "torchvision_cuda": { + "default_env": { + "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1": "torchvision" + }, + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda" + } + ], + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "torchvision", + "CM_TORCHVISION_VERSION_EXTRA": "CUDA" + }, + "new_env_keys": [ + "CM_TORCHVISION_VERSION*" + ] + }, + "tornado": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tornado" + }, + "new_env_keys": [ + "CM_TORNADO_VERSION" + ] + }, + "tqdm": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "tqdm" + }, + "new_env_keys": [ + "CM_TQDM_VERSION" + ] + }, + "transformers": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "transformers" + }, + "new_env_keys": [ + "CM_TRANSFORMERS_VERSION" + ] + }, + "typing_extensions": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "typing_extensions" + }, + "new_env_keys": [ + "CM_TYPING_EXTENSIONS_VERSION" + ] + }, + "ujson": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "ujson" + }, + "new_env_keys": [ + "CM_UJSON_VERSION" + ] + }, + "unidecode": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "unidecode" + }, + "new_env_keys": [ + "CM_UNIDECODE_VERSION" + ] + }, + "url.#": { + "env": { + "CM_GENERIC_PYTHON_PIP_URL": "#", + "CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL": "yes" + } + }, + "wandb": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "wandb" + }, + "new_env_keys": [ + "CM_WANDB_VERSION" + ] + }, + "west": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "west" + }, + "new_env_keys": [ + "CM_WEST_VERSION" + ] + }, + "xgboost": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "xgboost" + }, + "new_env_keys": [ + "CM_XGBOOST_VERSION" + ] + }, + "xlsxwriter": { + "env": { + "CM_GENERIC_PYTHON_PACKAGE_NAME": "xlsxwriter" + }, + "new_env_keys": [ + "CM_XLSXWRITER_VERSION" + ] + } + } +} diff --git a/script/get-generic-python-lib/customize.py b/script/get-generic-python-lib/customize.py new file mode 100644 index 0000000000..b15e2d9e00 --- /dev/null +++ b/script/get-generic-python-lib/customize.py @@ -0,0 +1,149 @@ +from cmind import utils +import os +import cmind as cm + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + run_script_input = i['run_script_input'] + pip_version = env.get('CM_PIP_VERSION', '').strip().split('.') + package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() + if package_name == '': + return automation._available_variations({'meta':meta}) + + if package_name == "onnxruntime_gpu": + # https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements + # 20240214: ONNXRuntime 1.17.0 now support CUDA 12 so we remove next check + # TBD: if we have explicit version for ONNX < 17.0.0 and CUDA is >= 12, we should add a check to fail ... + cuda_version = env.get('CM_CUDA_VERSION','').strip() +# if cuda_version!='': +# cuda_version_split = cuda_version.split('.') +# if int(cuda_version_split[0]) >= 12: +# # env['CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC'] = "yes" +# return {'return': 1, 'error':'at this moment, PIP package "onnxruntime_gpu" needs CUDA < 12'} + + extra = env.get('CM_GENERIC_PYTHON_PIP_EXTRA','') + if (pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23) and ('--break-system-packages' not in extra): + extra += ' --break-system-packages ' + env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" + + if env.get('CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS', '') != '': + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'uninstall_deps'}) + if r['return']>0: return r + + prepare_env_key = env['CM_GENERIC_PYTHON_PACKAGE_NAME'] + for x in ["-", "[", "]"]: + prepare_env_key = prepare_env_key.replace(x,"_") + + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] = prepare_env_key.upper() + + recursion_spaces = i['recursion_spaces'] + + r = automation.detect_version_using_script({ + 'env': env, + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + + force_install = (env.get('CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL', '') in ['yes', 'true', 'True', True]) + + if r['return'] >0 or force_install: + if r['return'] == 16 or force_install: + # Clean detected version env if exists otherwise takes detected version + # for example, when we reinstall generic python lib package + env_version_key = 'CM_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper()+'_VERSION' + if env.get(env_version_key,'')!='': + del(env[env_version_key]) + + # Check if upgrade + if force_install: + extra+=' --upgrade --no-deps --force-reinstall' + + # Check index URL + index_url = env.get('CM_GENERIC_PYTHON_PIP_INDEX_URL','').strip() + if index_url != '': + # Check special cases + if '${CM_TORCH_CUDA}' in index_url: + index_url=index_url.replace('${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + + extra += ' --index-url '+index_url + + # Check extra index URL + extra_index_url = env.get('CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL','').strip() + if extra_index_url != '': + # Check special cases + if '${CM_TORCH_CUDA}' in extra_index_url: + extra_index_url=extra_index_url.replace('${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + + extra += ' --extra-index-url '+extra_index_url + + # Check update + if env.get('CM_GENERIC_PYTHON_PIP_UPDATE','') in [True,'true','yes','on']: + extra +=' -U' + + print ('') + print (recursion_spaces + ' Extra PIP CMD: ' + extra) + print ('') + + env['CM_GENERIC_PYTHON_PIP_EXTRA'] = extra + + package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() + if package_name == '': + return automation._available_variations({'meta':meta}) + + r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) + + if r['return']>0: return r + + return {'return':0} + +def detect_version(i): + + env = i['env'] + + env_version_key = 'CM_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper()+'_VERSION' + + r = i['automation'].parse_version({'match_text': r'\s*([\d.a-z\-]+)', + 'group_number': 1, + 'env_key':env_version_key, + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + current_detected_version = version + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + + +def postprocess(i): + + env = i['env'] + + env_version_key = 'CM_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper()+'_VERSION' + + if env.get(env_version_key,'')!='': + version = env[env_version_key] + else: + r = detect_version(i) + if r['return'] >0: return r + + version = r['version'] + + env['CM_PYTHONLIB_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV']+'_CACHE_TAGS'] = 'version-'+version + + import pkgutil + package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() + package=pkgutil.get_loader(package_name) + if package: + installed_file_path = package.get_filename() + env['CM_GET_DEPENDENT_CACHED_PATH'] = installed_file_path + + pip_version = env.get('CM_PIP_VERSION', '').strip().split('.') + if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23: + env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" + + return {'return':0, 'version': version} diff --git a/script/get-generic-python-lib/detect-version.py b/script/get-generic-python-lib/detect-version.py new file mode 100644 index 0000000000..8cd53515ae --- /dev/null +++ b/script/get-generic-python-lib/detect-version.py @@ -0,0 +1,34 @@ +import os + +package_name = os.environ.get('CM_GENERIC_PYTHON_PACKAGE_NAME','') + +filename = 'tmp-ver.out' + +if os.path.isfile(filename): + os.remove(filename) + +if package_name != '': + + version = '' + error = '' + + try: + import importlib.metadata + version = importlib.metadata.version(package_name) + except Exception as e: + error = format(e) + + if error != '': + try: + import pkg_resources + version = pkg_resources.get_distribution(package_name).version + error = '' + except Exception as e: + if error!='': error += '\n' + error += format(e) + + # We generally skip error since it usually means that + # package is not installed + + with open(filename, 'w') as file: + file.write(str(version)+'\n') diff --git a/script/get-generic-python-lib/install.bat b/script/get-generic-python-lib/install.bat new file mode 100644 index 0000000000..0a5967462e --- /dev/null +++ b/script/get-generic-python-lib/install.bat @@ -0,0 +1,15 @@ +echo. + +if NOT "%CM_GENERIC_PYTHON_PIP_URL%" == "" ( + + %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PIP_URL% %CM_GENERIC_PYTHON_PIP_EXTRA% + IF %ERRORLEVEL% NEQ 0 EXIT 1 + +) else ( + + %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PACKAGE_NAME%%CM_TMP_PIP_VERSION_STRING% %CM_GENERIC_PYTHON_PIP_EXTRA% + IF %ERRORLEVEL% NEQ 0 EXIT 1 + +) + + diff --git a/script/get-generic-python-lib/install.sh b/script/get-generic-python-lib/install.sh new file mode 100644 index 0000000000..6029190cc3 --- /dev/null +++ b/script/get-generic-python-lib/install.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +echo "" + +if [[ ${CM_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then + cd ${CM_GIT_REPO_CHECKOUT_PATH} + cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install -v --disable-pip-version-check --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? + exit 0 +fi + +if [[ ${CM_GENERIC_PYTHON_PACKAGE_NAME} == "tensorflow_old" ]]; then + if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh + test $? -eq 0 || exit $? + exit 0 + fi + if [[ ${CM_HOST_PLATFORM_FLAVOR} == "aarch64" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh + test $? -eq 0 || exit $? + exit 0 + fi +fi +if [[ -n ${CM_GENERIC_PYTHON_PIP_URL} ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PIP_URL}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? + exit 0 +fi + +cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PACKAGE_NAME}${CM_TMP_PIP_VERSION_STRING}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/get-generic-python-lib/run.bat b/script/get-generic-python-lib/run.bat new file mode 100644 index 0000000000..2612377c89 --- /dev/null +++ b/script/get-generic-python-lib/run.bat @@ -0,0 +1,4 @@ +IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\detect-version.py +IF %ERRORLEVEL% NEQ 0 EXIT 1 diff --git a/script/get-generic-python-lib/run.sh b/script/get-generic-python-lib/run.sh new file mode 100644 index 0000000000..b60ac0814c --- /dev/null +++ b/script/get-generic-python-lib/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect-version.py +test $? -eq 0 || exit $? +exit 0 diff --git a/script/get-generic-python-lib/tensorflow/run-aarch64.sh b/script/get-generic-python-lib/tensorflow/run-aarch64.sh new file mode 100644 index 0000000000..6c11efb71b --- /dev/null +++ b/script/get-generic-python-lib/tensorflow/run-aarch64.sh @@ -0,0 +1,13 @@ +CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} + +${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA} +${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA} + +curl https://sh.rustup.rs -sSf -o tmp.sh +sh tmp.sh -y + +export PATH=$PATH:$HOME/.cargo/bin + +${CM_PYTHON_BIN} -m pip install tensorflow-aarch64${CM_TMP_PIP_VERSION_STRING} --user ${CM_PYTHON_PIP_COMMON_EXTRA} +test $? -eq 0 || exit 1 +echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-aarch64" >> $PWD/tmp-run-env.out diff --git a/script/get-generic-python-lib/tensorflow/run-macos.sh b/script/get-generic-python-lib/tensorflow/run-macos.sh new file mode 100644 index 0000000000..525b532eb5 --- /dev/null +++ b/script/get-generic-python-lib/tensorflow/run-macos.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +CM_PYTHON_BIN=${CM_PYTHON_BIN:-python3} + +${CM_PYTHON_BIN} -m pip install tensorflow-macos${CM_TMP_PIP_VERSION_STRING} +test $? -eq 0 || exit 1 +echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-macos" >> $PWD/tmp-run-env.out diff --git a/script/get-generic-python-lib/uninstall_deps.sh b/script/get-generic-python-lib/uninstall_deps.sh new file mode 100644 index 0000000000..eeddf36d7b --- /dev/null +++ b/script/get-generic-python-lib/uninstall_deps.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +if [[ -n ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip uninstall ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} -y ${CM_PYTHON_PIP_COMMON_EXTRA}" + echo "$cmd" + eval "$cmd" + test $? -eq 0 || exit $? +fi diff --git a/script/get-generic-sys-util/README.md b/script/get-generic-sys-util/README.md new file mode 100644 index 0000000000..856695c4c6 --- /dev/null +++ b/script/get-generic-sys-util/README.md @@ -0,0 +1,228 @@ +Automatically generated README for this automation recipe: **get-generic-sys-util** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-generic-sys-util,bb0393afa8404a11) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-sys-util)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,sys-util,generic,generic-sys-util* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get sys-util generic generic-sys-util" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,sys-util,generic,generic-sys-util` + +`cm run script --tags=get,sys-util,generic,generic-sys-util[,variations] ` + +*or* + +`cmr "get sys-util generic generic-sys-util"` + +`cmr "get sys-util generic generic-sys-util [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,sys-util,generic,generic-sys-util' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,sys-util,generic,generic-sys-util"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,sys-util,generic,generic-sys-util) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get sys-util generic generic-sys-util[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_g++-12` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `g++12` + - Workflow: + * `_gflags-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `gflags-dev` + - Workflow: + * `_git-lfs` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `git-lfs` + - Workflow: + * `_glog-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `glog-dev` + - Workflow: + * `_libboost-all-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libboost-all-dev` + - Workflow: + * `_libffi7` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libffi7` + - Workflow: + * `_libgmock-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libgmock-dev` + - Workflow: + * `_libmpfr-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libmpfr-dev` + - Workflow: + * `_libnuma-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libnuma-dev` + - Workflow: + * `_libpci-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libpci-dev` + - Workflow: + * `_libre2-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libre2-dev` + - Workflow: + * `_libudev-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `libudev-dev` + - Workflow: + * `_ninja-build` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `ninja-build` + - Workflow: + * `_ntpdate` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `ntpdate` + - Workflow: + * `_numactl` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `numactl` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * install,numactl,from.src + * `if (CM_HOST_OS_FLAVOR == rhel AND CM_HOST_OS_VERSION in ['9.1', '9.2', '9.3'])` + - CM script: [install-numactl-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-numactl-from-src) + * `_nvidia-cuda-toolkit` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `nvidia-cuda-toolkit` + - Workflow: + * `_rapidjson-dev` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `rapidjson-dev` + - Workflow: + * `_rsync` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `rsync` + - Workflow: + * `_screen` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `screen` + - Workflow: + * `_sox` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `sox` + - Workflow: + * `_transmission` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `transmission` + - Workflow: + * `_zlib` + - Environment variables: + - *CM_SYS_UTIL_NAME*: `zlib` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_CLEAN_DIRS: `bin` +* CM_SUDO: `sudo` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-sys-util/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-sys-util/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-sys-util/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-sys-util/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-sys-util/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-generic-sys-util/_cm.json) + +___ +### Script output +`cmr "get sys-util generic generic-sys-util [,variations]" -j` +#### New environment keys (filter) + +* `+PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-generic-sys-util/_cm.json b/script/get-generic-sys-util/_cm.json new file mode 100644 index 0000000000..5fa723b24a --- /dev/null +++ b/script/get-generic-sys-util/_cm.json @@ -0,0 +1,317 @@ +{ + "alias": "get-generic-sys-util", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + { + "tags": "detect,os" + } + ], + "default_env": { + "CM_CLEAN_DIRS": "bin", + "CM_SUDO": "sudo" + }, + "new_env_keys": [ + "+PATH" + ], + "tags": [ + "get", + "sys-util", + "generic", + "generic-sys-util" + ], + "uid": "bb0393afa8404a11", + "variations": { + "glog-dev": { + "env": { + "CM_SYS_UTIL_NAME": "glog-dev" + }, + "state": { + "glog-dev": { + "apt": "libgoogle-glog-dev", + "dnf": "glog-devel", + "yum": "glog-devel", + "brew": "glog" + } + } + }, + "gflags-dev": { + "env": { + "CM_SYS_UTIL_NAME": "gflags-dev" + }, + "state": { + "gflags-dev": { + "apt": "libgflags-dev", + "dnf": "gflags-devel", + "yum": "gflags-devel", + "brew": "gflags" + } + } + }, + "sox": { + "env": { + "CM_SYS_UTIL_NAME": "sox" + }, + "state": { + "sox": { + "apt": "sox", + "dnf": "sox", + "brew": "sox" + } + } + }, + "libre2-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libre2-dev" + }, + "state": { + "libre2-dev": { + "apt": "libre2-dev", + "yum": "libre-devel", + "dnf": "libre-devel", + "brew": "" + } + } + }, + "libboost-all-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libboost-all-dev" + }, + "state": { + "libboost-all-dev": { + "apt": "libboost-all-dev", + "dnf": "boost-devel", + "yum": "boost-devel", + "brew": "" + } + } + }, + "libnuma-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libnuma-dev" + }, + "state": { + "libnuma-dev": { + "apt": "libnuma-dev", + "yum": "numactl-libs", + "dnf": "numactl-libs", + "brew": "" + } + } + }, + "libmpfr-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libmpfr-dev" + }, + "state": { + "libmpfr-dev": { + "apt": "libmpfr-dev", + "yum": "mpfr-devel.x86_64", + "dnf": "mpfr-devel.x86_64", + "brew": "", + "zypper": "mpfr-devel" + } + } + }, + "rapidjson-dev": { + "env": { + "CM_SYS_UTIL_NAME": "rapidjson-dev" + }, + "state": { + "rapidjson-dev": { + "apt": "rapidjson-dev", + "dnf": "rapidjson-devel", + "yum": "rapidjson-devel", + "brew": "" + } + } + }, + "screen": { + "env": { + "CM_SYS_UTIL_NAME": "screen" + }, + "state": { + "screen": { + "apt": "screen", + "dnf": "screen", + "yum": "screen", + "brew": "screen", + "zypper": "rsync" + } + } + }, + "ninja-build": { + "env": { + "CM_SYS_UTIL_NAME": "ninja-build" + }, + "state": { + "ninja-build": { + "apt": "ninja-build", + "dnf": "ninja-build", + "yum": "ninja-build", + "brew": "ninja-build", + "zypper": "ninja-build" + } + } + }, + "rsync": { + "env": { + "CM_SYS_UTIL_NAME": "rsync" + }, + "state": { + "rsync": { + "apt": "rsync", + "dnf": "rsync", + "yum": "rsync", + "brew": "rsync", + "zypper": "rsync" + } + } + }, + "transmission": { + "env": { + "CM_SYS_UTIL_NAME": "transmission" + }, + "state": { + "transmission": { + "apt": "transmission-daemon", + "dnf": "transmission-daemon", + "yum": "transmission-daemon", + "brew": "transmission" + } + } + }, + "libpci-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libpci-dev" + }, + "state": { + "libpci-dev": { + "apt": "libpci-dev", + "dnf": "pciutils-devel", + "yum": "pciutils-devel", + "brew": "" + } + } + }, + "libudev-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libudev-dev" + }, + "state": { + "libudev-dev": { + "apt": "libudev-dev", + "dnf": "libudev-devl", + "yum": "libudev-devel", + "brew": "" + } + } + }, + "libgmock-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libgmock-dev" + }, + "state": { + "libgmock-dev": { + "apt": "libgmock-dev", + "dnf": "gmock-devel", + "yum": "gmock-devel", + "brew": "" + } + } + }, + "zlib": { + "env": { + "CM_SYS_UTIL_NAME": "zlib" + }, + "state": { + "zlib": { + "apt": "zlib1g" + } + } + }, + "nvidia-cuda-toolkit": { + "env": { + "CM_SYS_UTIL_NAME": "nvidia-cuda-toolkit" + }, + "state": { + "nvidia-cuda-toolkit": { + "apt": "nvidia-cuda-toolkit", + "dnf": "nvidia-cuda-toolkit", + "yum": "nvidia-cuda-toolkit", + "brew": "" + } + } + }, + "ntpdate": { + "env": { + "CM_SYS_UTIL_NAME": "ntpdate" + }, + "state": { + "ntpdate": { + "apt": "ntpdate", + "dnf": "ntpdate", + "yum": "ntpdate", + "brew": "" + } + } + }, + "git-lfs": { + "env": { + "CM_SYS_UTIL_NAME": "git-lfs" + }, + "state": { + "git-lfs": { + "apt": "git-lfs", + "dnf": "git-lfs", + "yum": "git-lfs", + "brew": "git-lfs" + } + } + }, + "libffi7": { + "env": { + "CM_SYS_UTIL_NAME": "libffi7" + }, + "state": { + "libffi7": { + "apt": "libffi7" + } + } + }, + "numactl": { + "env": { + "CM_SYS_UTIL_NAME": "numactl" + }, + "state": { + "numactl": { + "apt": "numactl", + "dnf": "numactl-devel", + "yum": "numactl-devel" + } + }, + "deps": [ + { + "tags": "install,numactl,from.src", + "enable_if_env": { + "CM_HOST_OS_FLAVOR": [ "rhel" ], + "CM_HOST_OS_VERSION": [ "9.1", "9.2", "9.3" ] + } + } + ] + }, + "g++-12": { + "env": { + "CM_SYS_UTIL_NAME": "g++12" + }, + "state": { + "g++12": { + "apt": "g++-12", + "dnf": "gcc-toolset-12-gcc-c++" + } + } + } + } +} diff --git a/script/get-generic-sys-util/customize.py b/script/get-generic-sys-util/customize.py new file mode 100644 index 0000000000..ef0bc33e8c --- /dev/null +++ b/script/get-generic-sys-util/customize.py @@ -0,0 +1,53 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + pm = env.get('CM_HOST_OS_PACKAGE_MANAGER') + + if os_info['platform'] == 'windows': + print ('') + print ('WARNING: for now skipping get-generic-sys-util on Windows ...') + print ('') + + return {'return':0} + + if not pm: + return {'return': 1, 'error': 'Package manager not detected for the given OS'} + + util = env.get('CM_SYS_UTIL_NAME') + if not util: + return {'return': 1, 'error': 'Please select a variation specifying the sys util name'} + + package = state.get(util) + if not package: + return {'return': 1, 'error': 'No package name specified for {} and util name {}'.format(pm, util)} + + package_name = package.get(pm) + if not package_name: + return {'return': 1, 'error': 'No package name specified for {} and util name {}'.format(pm, util)} + + install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') + if not install_cmd: + return {'return': 1, 'error': 'Package manager installation command not detected for the given OS'} + + if pm == "brew": + sudo = '' + else: + sudo = env.get('CM_SUDO', '') + env['CM_SYS_UTIL_INSTALL_CMD'] = sudo + ' ' +install_cmd + ' ' + package_name + + env['+PATH'] = [] + + if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': + if env['CM_SYS_UTIL_NAME'] == "g++12": + env['+PATH'] = [ "/opt/rh/gcc-toolset-12/root/usr/bin" ] + + if env['CM_SYS_UTIL_NAME'] == "numactl" and env['CM_HOST_OS_VERSION'] in [ "9.1", "9.2", "9.3" ]: + env['CM_SYS_UTIL_INSTALL_CMD'] = '' + + return {'return':0} diff --git a/script/get-generic-sys-util/run.sh b/script/get-generic-sys-util/run.sh new file mode 100644 index 0000000000..27c2f62867 --- /dev/null +++ b/script/get-generic-sys-util/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd=${CM_SYS_UTIL_INSTALL_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/get-git-repo/README-extra.md b/script/get-git-repo/README-extra.md new file mode 100644 index 0000000000..83a368e5f0 --- /dev/null +++ b/script/get-git-repo/README-extra.md @@ -0,0 +1,20 @@ +# Get GIT Repository +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones any specified GIT repository. + +## Commands +To install +``` +cm run script --tags=get,git,repo,_repo.,[VARIATION] +``` +where [VARIATION] is one of +* `patch:` Applies the `git.patch` to the cloned git repository +* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) +* `full-history:` Uses the full git history +* `no-recurse-submodules:` Only download the main repository + +## Exported Variables +* `CM_GIT_CHECKOUT_PATH`: Directory path of the cloned git repository + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-git-repo/README.md b/script/get-git-repo/README.md new file mode 100644 index 0000000000..3c55e2b4a0 --- /dev/null +++ b/script/get-git-repo/README.md @@ -0,0 +1,241 @@ +Automatically generated README for this automation recipe: **get-git-repo** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-git-repo,ed603e7292974f10) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,git,repo,repository,clone* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get git repo repository clone" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,git,repo,repository,clone` + +`cm run script --tags=get,git,repo,repository,clone[,variations] [--input_flags]` + +*or* + +`cmr "get git repo repository clone"` + +`cmr "get git repo repository clone [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,git,repo,repository,clone' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,git,repo,repository,clone"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,git,repo,repository,clone) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get git repo repository clone[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_lfs` + - Environment variables: + - *CM_GIT_REPO_NEEDS_LFS*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic,sys-util,_git-lfs + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * `_no-recurse-submodules` + - Environment variables: + - *CM_GIT_RECURSE_SUBMODULES*: `` + - Workflow: + * `_patch` + - Environment variables: + - *CM_GIT_PATCH*: `yes` + - Workflow: + * `_submodules.#` + - Environment variables: + - *CM_GIT_SUBMODULES*: `#` + - Workflow: + +
+ + + * Group "**checkout**" +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_BRANCH*: `#` + - Workflow: + * `_sha.#` + - Environment variables: + - *CM_GIT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**git-history**" +
+ Click here to expand this section. + + * `_full-history` + - Environment variables: + - *CM_GIT_DEPTH*: `` + - Workflow: + * **`_short-history`** (default) + - Environment variables: + - *CM_GIT_DEPTH*: `--depth 5` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + +
+ + +#### Default variations + +`_short-history` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--branch=value` → `CM_GIT_CHECKOUT=value` +* `--depth=value` → `CM_GIT_DEPTH=value` +* `--env_key=value` → `CM_GIT_ENV_KEY=value` +* `--folder=value` → `CM_GIT_CHECKOUT_FOLDER=value` +* `--patch=value` → `CM_GIT_PATCH=value` +* `--submodules=value` → `CM_GIT_RECURSE_SUBMODULES=value` +* `--update=value` → `CM_GIT_REPO_PULL=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "branch":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_DEPTH: `--depth 4` +* CM_GIT_CHECKOUT_FOLDER: `repo` +* CM_GIT_PATCH: `no` +* CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` +* CM_GIT_URL: `https://github.com/mlcommons/ck.git` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-git-repo/_cm.json)*** + * pull,git,repo + * `if (CM_GIT_REPO_PULL in ['yes', 'True'])` + * CM names: `--adr.['pull-git-repo']...` + - CM script: [pull-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/pull-git-repo) + +___ +### Script output +`cmr "get git repo repository clone [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `<<>>` +* `CM_GIT_CHECKOUT_PATH` +* `CM_GIT_REPO_*` +#### New environment keys auto-detected from customize + +* `CM_GIT_CHECKOUT_PATH` +* `CM_GIT_REPO_CURRENT_HASH` \ No newline at end of file diff --git a/script/get-git-repo/_cm.json b/script/get-git-repo/_cm.json new file mode 100644 index 0000000000..3d178f9242 --- /dev/null +++ b/script/get-git-repo/_cm.json @@ -0,0 +1,124 @@ +{ + "alias": "get-git-repo", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "DevOps automation", + "default_env": { + "CM_GIT_DEPTH": "--depth 4", + "CM_GIT_CHECKOUT_FOLDER": "repo", + "CM_GIT_PATCH": "no", + "CM_GIT_RECURSE_SUBMODULES": " --recurse-submodules", + "CM_GIT_URL": "https://github.com/mlcommons/ck.git" + }, + "input_mapping": { + "branch": "CM_GIT_CHECKOUT", + "depth": "CM_GIT_DEPTH", + "folder": "CM_GIT_CHECKOUT_FOLDER", + "patch": "CM_GIT_PATCH", + "update": "CM_GIT_REPO_PULL", + "env_key": "CM_GIT_ENV_KEY", + "submodules": "CM_GIT_RECURSE_SUBMODULES" + }, + "deps": [ + { + "tags": "detect,os" + } + ], + "post_deps": [ + { + "tags": "pull,git,repo", + "names": [ "pull-git-repo" ], + "dynamic": true, + "enable_if_env": { + "CM_GIT_REPO_PULL": [ "yes", "True" ] + }, + "force_env_keys": [ + "CM_GIT_CHECKOUT_PATH" + ] + } + ], + "new_env_keys": [ + "CM_GIT_CHECKOUT_PATH", + "CM_GIT_REPO_*", + "<<>>" + ], + "tags": [ + "get", + "git", + "repo", + "repository", + "clone" + ], + "uid": "ed603e7292974f10", + "variations": { + "full-history": { + "group": "git-history", + "env": { + "CM_GIT_DEPTH": "" + } + }, + "no-recurse-submodules": { + "env": { + "CM_GIT_RECURSE_SUBMODULES": "" + } + }, + "patch": { + "env": { + "CM_GIT_PATCH": "yes" + } + }, + "short-history": { + "group": "git-history", + "default": true, + "env": { + "CM_GIT_DEPTH": "--depth 5" + } + }, + "branch.#": { + "group": "checkout", + "env": { + "CM_GIT_BRANCH": "#" + } + }, + "sha.#": { + "group": "checkout", + "env": { + "CM_GIT_SHA": "#" + }, + "default_variations": { + "git-history": "full-history" + } + }, + "tag.#": { + "group": "checkout", + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + }, + "submodules.#": { + "env": { + "CM_GIT_SUBMODULES": "#" + } + }, + "repo.#": { + "group": "repo", + "env": { + "CM_GIT_URL": "#" + } + }, + "lfs": { + "env": { + "CM_GIT_REPO_NEEDS_LFS": "yes" + }, + "deps": [ + { + "tags": "get,generic,sys-util,_git-lfs" + } + ] + } + }, + "print_env_at_the_end" : { + "CM_GIT_CHECKOUT_PATH": "CM cache path to the Git repo" + } +} diff --git a/script/get-git-repo/customize.py b/script/get-git-repo/customize.py new file mode 100644 index 0000000000..d1e92f7040 --- /dev/null +++ b/script/get-git-repo/customize.py @@ -0,0 +1,89 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + + env_key = get_env_key(env) + + cm_git_url = env['CM_GIT_URL'] + + + if 'CM_GIT_REPO_NAME' not in env: + update_env(env, 'CM_GIT_REPO{}_NAME', env_key, os.path.basename(env['CM_GIT_URL'])) + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + if env.get('CM_GIT_CHECKOUT', '') == '': + env['CM_GIT_CHECKOUT'] = env.get('CM_GIT_SHA', env.get('CM_GIT_BRANCH', '')) + + git_checkout_string = " -b "+ env['CM_GIT_BRANCH'] if ("CM_GIT_BRANCH" in env and env.get('CM_GIT_SHA', '') == '') else "" + + git_clone_cmd = "git clone " + env['CM_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + env['CM_GIT_URL'] + " " + env.get('CM_GIT_DEPTH','') + ' ' + env['CM_GIT_CHECKOUT_FOLDER'] + + env['CM_GIT_CLONE_CMD'] = git_clone_cmd + env['CM_TMP_GIT_PATH'] = os.path.join(os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".git") + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + env['CM_GIT_CHECKOUT_PATH'] = os.path.join(os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER']) + git_checkout_path = env['CM_GIT_CHECKOUT_PATH'] + + env_key = get_env_key(env) + + # We remap CM_GIT variables with CM_GIT_REPO prefix so that they don't contaminate the env of the parent script + update_env(env, 'CM_GIT_REPO{}_CHECKOUT_PATH', env_key, env['CM_GIT_CHECKOUT_PATH']) + update_env(env, 'CM_GIT_REPO{}_URL', env_key, env['CM_GIT_URL']) + update_env(env, 'CM_GIT_REPO{}_CHECKOUT', env_key, env['CM_GIT_CHECKOUT']) + update_env(env, 'CM_GIT_REPO{}_DEPTH', env_key, env['CM_GIT_DEPTH']) + update_env(env, 'CM_GIT_REPO{}_CHECKOUT_FOLDER', env_key, env['CM_GIT_CHECKOUT_FOLDER']) + update_env(env, 'CM_GIT_REPO{}_PATCH', env_key, env['CM_GIT_PATCH']) + update_env(env, 'CM_GIT_REPO{}_RECURSE_SUBMODULES', env_key, env['CM_GIT_RECURSE_SUBMODULES']) + + if (env.get('CM_GIT_CHECKOUT_PATH_ENV_NAME','') != ''): + env[env['CM_GIT_CHECKOUT_PATH_ENV_NAME']] = git_checkout_path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = git_checkout_path + + if os.path.exists("tmp-cm-git-hash.out"): + with open("tmp-cm-git-hash.out", "r") as f: + git_hash = f.readline().strip() + env['CM_GIT_REPO_CURRENT_HASH'] = git_hash + + return {'return':0} + +def get_env_key(env): + + env_key = env.get('CM_GIT_ENV_KEY','') + + if env_key!='' and not env_key.startswith('_'): + env_key = '_' + env_key + + return env_key + +def update_env(env, key, env_key, var): + + env[key.format('')] = var + + if env_key!='': + env[key.format(env_key)] = var + + return diff --git a/script/get-git-repo/run.bat b/script/get-git-repo/run.bat new file mode 100644 index 0000000000..583d324ee2 --- /dev/null +++ b/script/get-git-repo/run.bat @@ -0,0 +1,67 @@ +@echo off + +rem echo ****************************************************** +rem echo Cloning MLCommons from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... + +rem git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% inference +rem cd inference +rem git checkout -b "%CM_GIT_CHECKOUT%" +rem + +rem Next line allows ERRORLEVEL inside if statements! +setlocal enabledelayedexpansion + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +set folder=%CM_GIT_CHECKOUT_FOLDER% + +if not exist "%CM_TMP_GIT_PATH%" ( + + if exist %folder% ( + deltree %folder% + ) + echo ****************************************************** + echo Current directory: %CUR_DIR% + echo. + echo Cloning %CM_GIT_REPO_NAME% from %CM_GIT_URL% + echo. + echo "%CM_GIT_CLONE_CMD%" + echo. + %CM_GIT_CLONE_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + cd %folder% + if not "%CM_GIT_SHA%" == "" ( + echo. + echo. + git checkout "%CM_GIT_CHECKOUT%" + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + ) + +) else ( + + cd %folder% + +) + +if not "%CM_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%CM_GIT_SUBMODULES%") do ( + echo. + echo Initializing submodule %%s + git submodule update --init %%s + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + ) +) + +if "%CM_GIT_PATCH%" == "yes" ( + for %%x in (%CM_GIT_PATCH_FILEPATHS%) do ( + echo. + echo Applying patch %%x ... + git apply %%x + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + ) +) + +cd %CUR_DIR% + +exit /b 0 diff --git a/script/get-git-repo/run.sh b/script/get-git-repo/run.sh new file mode 100644 index 0000000000..06f27b1c78 --- /dev/null +++ b/script/get-git-repo/run.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +folder=${CM_GIT_CHECKOUT_FOLDER} +if [ ! -d "${CM_TMP_GIT_PATH}" ]; then + rm -rf ${folder} + echo "******************************************************" + echo "Current directory: ${CUR_DIR}" + echo "" + echo "Cloning ${CM_GIT_REPO_NAME} from ${CM_GIT_URL}" + echo "" + echo "${CM_GIT_CLONE_CMD}"; + echo "" + + ${CM_GIT_CLONE_CMD} + if [ "${?}" != "0" ]; then exit $?; fi + + cd ${folder} + + if [ ! -z ${CM_GIT_SHA} ]; then + + echo "" + cmd="git checkout -b ${CM_GIT_SHA} ${CM_GIT_SHA}" + echo "$cmd" + eval "$cmd" + if [ "${?}" != "0" ]; then exit $?; fi + + elif [ ! -z ${CM_GIT_CHECKOUT_TAG} ]; then + + echo "" + cmd="git fetch --all --tags" + echo "$cmd" + eval "$cmd" + cmd="git checkout tags/${CM_GIT_CHECKOUT_TAG} -b ${CM_GIT_CHECKOUT_TAG}" + echo "$cmd" + eval "$cmd" + if [ "${?}" != "0" ]; then exit $?; fi + + else + cmd="git rev-parse HEAD >> ../tmp-cm-git-hash.out" + echo "$cmd" + eval "$cmd" + if [ "${?}" != "0" ]; then exit $?; fi + fi + +else + cd ${folder} +fi + + +IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" + +for submodule in "${submodules[@]}" +do + echo "" + echo "Initializing submodule ${submodule}" + git submodule update --init "${submodule}" + if [ "${?}" != "0" ]; then exit $?; fi +done + +if [ ${CM_GIT_PATCH} == "yes" ]; then + IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILEPATHS} + for patch_file in "${patch_files[@]}" + do + echo "" + echo "Applying patch $patch_file" + git apply "$patch_file" + if [ "${?}" != "0" ]; then exit $?; fi + done +fi + +cd "$CUR_DIR" diff --git a/script/get-github-cli/README.md b/script/get-github-cli/README.md new file mode 100644 index 0000000000..2ed8ebd7f7 --- /dev/null +++ b/script/get-github-cli/README.md @@ -0,0 +1,122 @@ +Automatically generated README for this automation recipe: **get-github-cli** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-github-cli,1417029c6ff44f21) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,gh,gh-cli,github,cli,github-cli* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get gh gh-cli github cli github-cli" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,gh,gh-cli,github,cli,github-cli` + +`cm run script --tags=get,gh,gh-cli,github,cli,github-cli ` + +*or* + +`cmr "get gh gh-cli github cli github-cli"` + +`cmr "get gh gh-cli github cli github-cli " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,gh,gh-cli,github,cli,github-cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,gh,gh-cli,github,cli,github-cli"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,gh,gh-cli,github,cli,github-cli) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get gh gh-cli github cli github-cli" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-github-cli/_cm.json) + +___ +### Script output +`cmr "get gh gh-cli github cli github-cli " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-github-cli/_cm.json b/script/get-github-cli/_cm.json new file mode 100644 index 0000000000..07f40c672d --- /dev/null +++ b/script/get-github-cli/_cm.json @@ -0,0 +1,22 @@ +{ + "alias": "get-github-cli", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "DevOps automation", + "clean_files": [], + "deps": [ + { + "tags": "detect,os" + } + ], + "tags": [ + "get", + "gh", + "gh-cli", + "github", + "cli", + "github-cli" + ], + "uid": "1417029c6ff44f21" +} diff --git a/script/get-github-cli/customize.py b/script/get-github-cli/customize.py new file mode 100644 index 0000000000..8c64641189 --- /dev/null +++ b/script/get-github-cli/customize.py @@ -0,0 +1,54 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'gh.exe' if os_info['platform'] == 'windows' else 'gh' + + # Will check env['CM_TMP_PATH'] if comes from installation script + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_GITHUBCLI_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + return r + + print (recursion_spaces+' # {}'.format(r['error'])) + + # Attempt to run installer + r = {'return':0, 'skip':True, 'script':{'tags':'install,github-cli'}} + + return r + + found_path = r['found_path'] + + return {'return':0} + + +def postprocess(i): + env = i['env'] + + r = i['automation'].parse_version({'match_text': r'gh\s*version\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_GITHUBCLI_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + + return {'return':0, 'version':version} diff --git a/script/get-github-cli/run.bat b/script/get-github-cli/run.bat new file mode 100644 index 0000000000..5d06678cf3 --- /dev/null +++ b/script/get-github-cli/run.bat @@ -0,0 +1 @@ +gh.exe --version > tmp-ver.out diff --git a/script/get-github-cli/run.sh b/script/get-github-cli/run.sh new file mode 100644 index 0000000000..6ac03d3ca5 --- /dev/null +++ b/script/get-github-cli/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash +gh --version > tmp-ver.out diff --git a/script/get-go/README-extra.md b/script/get-go/README-extra.md new file mode 100644 index 0000000000..d1c4f9caa8 --- /dev/null +++ b/script/get-go/README-extra.md @@ -0,0 +1,10 @@ +# Get GO Tool +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed GO tool on the system. + +## Exported Variables +* `CM_GO_BIN_WITH_PATH` +* `+PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-go/README.md b/script/get-go/README.md new file mode 100644 index 0000000000..a0faecd0a6 --- /dev/null +++ b/script/get-go/README.md @@ -0,0 +1,127 @@ +Automatically generated README for this automation recipe: **get-go** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-go,ab42647a96724a25) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,tool,go,get-go* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get tool go get-go" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,tool,go,get-go` + +`cm run script --tags=get,tool,go,get-go ` + +*or* + +`cmr "get tool go get-go"` + +`cmr "get tool go get-go " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,tool,go,get-go' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,tool,go,get-go"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,tool,go,get-go) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get tool go get-go" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go/_cm.json)*** + * install,go + * `if (CM_REQUIRE_INSTALL == yes)` + - *Warning: no scripts found* + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-go/_cm.json) + +___ +### Script output +`cmr "get tool go get-go " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_GO_*` +#### New environment keys auto-detected from customize + +* `CM_GO_CACHE_TAGS` +* `CM_GO_INSTALLED_PATH` \ No newline at end of file diff --git a/script/get-go/_cm.json b/script/get-go/_cm.json new file mode 100644 index 0000000000..f21b9abb1e --- /dev/null +++ b/script/get-go/_cm.json @@ -0,0 +1,33 @@ +{ + "alias": "get-go", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "clean_files": [], + "new_env_keys": [ + "CM_GO_*", + "+PATH" + ], + "env": { + "CM_REQUIRE_INSTALL": "no" + }, + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,go" + } + ], + "tags": [ + "get", + "tool", + "go", + "get-go" + ], + "uid": "ab42647a96724a25" +} diff --git a/script/get-go/customize.py b/script/get-go/customize.py new file mode 100644 index 0000000000..d65126585b --- /dev/null +++ b/script/get-go/customize.py @@ -0,0 +1,59 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'go.exe' if os_info['platform'] == 'windows' else 'go' + env['FILE_NAME'] = file_name + if 'CM_GO_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_GO_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'\s+go([\d.]+)', + 'group_number': 1, + 'env_key':'CM_GO_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] >0: return r + + version = r['version'] + found_file_path = env['CM_GO_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_GO_INSTALLED_PATH'] = found_path + + env['CM_GO_CACHE_TAGS'] = 'version-'+version + + return {'return':0, 'version': version} diff --git a/script/get-go/run.sh b/script/get-go/run.sh new file mode 100644 index 0000000000..51faa937d5 --- /dev/null +++ b/script/get-go/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +go version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-google-saxml/README.md b/script/get-google-saxml/README.md new file mode 100644 index 0000000000..5c85d56970 --- /dev/null +++ b/script/get-google-saxml/README.md @@ -0,0 +1,135 @@ +Automatically generated README for this automation recipe: **get-google-saxml** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-google-saxml,5d7b17d84b5a48fb) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,google,saxml* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get google saxml" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,google,saxml` + +`cm run script --tags=get,google,saxml ` + +*or* + +`cmr "get google saxml"` + +`cmr "get google saxml " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,google,saxml' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,google,saxml"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,google,saxml) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get google saxml" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `master` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,_repo.https://github.com/google/saxml + * CM names: `--adr.['google-saxml-git-src']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * get,bazel + * CM names: `--adr.['bazel']...` + - CM script: [get-bazel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-bazel) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-saxml/_cm.yaml) + +___ +### Script output +`cmr "get google saxml " -j` +#### New environment keys (filter) + +* `CM_GOOGLE_SAXML*` +#### New environment keys auto-detected from customize diff --git a/script/get-google-saxml/_cm.yaml b/script/get-google-saxml/_cm.yaml new file mode 100644 index 0000000000..2e2db0f886 --- /dev/null +++ b/script/get-google-saxml/_cm.yaml @@ -0,0 +1,45 @@ +alias: get-google-saxml +uid: 5d7b17d84b5a48fb + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML frameworks + +default_version: master + +deps: + + - tags: detect,os + + - names: + - python3 + - python + tags: get,python3 + + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_GOOGLE_SAXML_SRC + extra_cache_tags: google,saxsml,src + force_env_keys: + - CM_GIT_CHECKOUT + names: + - google-saxml-git-src + tags: get,git,_repo.https://github.com/google/saxml + + - tags: get,bazel + names: + - bazel + +extra_cache_tags_from_env: + - env: CM_PYTHON_CACHE_TAGS + prefix: python- + +new_env_keys: +- CM_GOOGLE_SAXML* + +tags: +- get +- google +- saxml diff --git a/script/get-google-saxml/customize.py b/script/get-google-saxml/customize.py new file mode 100644 index 0000000000..d38c8c2ca5 --- /dev/null +++ b/script/get-google-saxml/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + # TBD + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + # TBD + cur_dir = os.getcwd() + + + + return {'return':0} diff --git a/script/get-google-saxml/run.bat b/script/get-google-saxml/run.bat new file mode 100644 index 0000000000..ceaa88fea4 --- /dev/null +++ b/script/get-google-saxml/run.bat @@ -0,0 +1,3 @@ +@echo off + +echo TBD diff --git a/script/get-google-saxml/run.sh b/script/get-google-saxml/run.sh new file mode 100644 index 0000000000..bbb9d5222b --- /dev/null +++ b/script/get-google-saxml/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo "TBD" diff --git a/script/get-google-test/README.md b/script/get-google-test/README.md new file mode 100644 index 0000000000..dd85ed19f9 --- /dev/null +++ b/script/get-google-test/README.md @@ -0,0 +1,139 @@ +Automatically generated README for this automation recipe: **get-google-test** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-google-test,02945138a5614253) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,google-test,googletest,gtest,test,google* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get google-test googletest gtest test google" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,google-test,googletest,gtest,test,google` + +`cm run script --tags=get,google-test,googletest,gtest,test,google ` + +*or* + +`cmr "get google-test googletest gtest test google"` + +`cmr "get google-test googletest gtest test google " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,google-test,googletest,gtest,test,google' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,google-test,googletest,gtest,test,google"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,google-test,googletest,gtest,test,google) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get google-test googletest gtest test google" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `1.14.0` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test/_cm.json)*** + * get,cmake + * CM names: `--adr.['cmake']...` + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,compiler + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test/_cm.json)*** + * get,git,repo,_repo.https://github.com/google/googletest.git + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-google-test/_cm.json) + +___ +### Script output +`cmr "get google-test googletest gtest test google " -j` +#### New environment keys (filter) + +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `CM_GOOGLE_TEST_INSTALL_PATH` +* `CM_GOOGLE_TEST_SRC_PATH` +#### New environment keys auto-detected from customize + +* `CM_GOOGLE_TEST_INSTALL_PATH` +* `CM_GOOGLE_TEST_SRC_PATH` \ No newline at end of file diff --git a/script/get-google-test/_cm.json b/script/get-google-test/_cm.json new file mode 100644 index 0000000000..ce17db8aa9 --- /dev/null +++ b/script/get-google-test/_cm.json @@ -0,0 +1,52 @@ +{ + "alias": "get-google-test", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + { + "tags": "get,cmake", + "names": [ + "cmake" + ] + }, + { + "tags": "get,compiler", + "names": [ + "compiler" + ] + } + ], + "input_description": {}, + "default_version": "1.14.0", + "input_mapping": {}, + "new_env_keys": [ + "CM_GOOGLE_TEST_SRC_PATH", + "CM_GOOGLE_TEST_INSTALL_PATH", + "+C_INCLUDE_PATH", + "+LD_LIBRARY_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [ + { + "force_env_keys": [ + "CM_GIT_*" + ], + "tags": "get,git,repo,_repo.https://github.com/google/googletest.git", + "extra_cache_tags": "google-test,gtest" + } + ], + "tags": [ + "get", + "google-test", + "googletest", + "gtest", + "test", + "google" + ], + "uid": "02945138a5614253", + "versions": {} +} diff --git a/script/get-google-test/customize.py b/script/get-google-test/customize.py new file mode 100644 index 0000000000..299778e3f7 --- /dev/null +++ b/script/get-google-test/customize.py @@ -0,0 +1,33 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_GIT_CHECKOUT'] = "v"+env['CM_VERSION'] + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + if '+C_INCLUDE_PATH' not in env: + env['+C_INCLUDE_PATH'] = [] + if '+LD_LIBRARY_PATH' not in env: + env['+LD_LIBRARY_PATH'] = [] + + gtest_install_path = os.path.join(os.getcwd(), "install") + env['CM_GOOGLE_TEST_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['CM_GOOGLE_TEST_INSTALL_PATH'] = gtest_install_path + env['+C_INCLUDE_PATH'].append(os.path.join(gtest_install_path, "include")) + env['+LD_LIBRARY_PATH'].append(os.path.join(gtest_install_path, "lib")) + + return {'return':0} diff --git a/script/get-google-test/run.sh b/script/get-google-test/run.sh new file mode 100644 index 0000000000..c8a9a44253 --- /dev/null +++ b/script/get-google-test/run.sh @@ -0,0 +1,23 @@ +#!/bin/bash +function cmake() { +${CM_CMAKE_BIN_WITH_PATH} $@ +} + +export CC=${CM_C_COMPILER_WITH_PATH} +export CXX=${CM_CXX_COMPILER_WITH_PATH} + +CUR=$PWD +mkdir -p install +INSTALL_DIR=$CUR/install +cd ${CM_GIT_REPO_CHECKOUT_PATH} + +mkdir build +cd build +export MAKEFLAGS=-j${CM_MAKE_CORES} +cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} .. +test $? -eq 0 || exit $? + +CMD="make install" +echo ${CMD} +eval $CMD +test $? -eq 0 || exit $? diff --git a/script/get-ipol-src/README-extra.md b/script/get-ipol-src/README-extra.md new file mode 100644 index 0000000000..1618d0ed0b --- /dev/null +++ b/script/get-ipol-src/README-extra.md @@ -0,0 +1 @@ +20240127: Grigori added patch to support latest PIL diff --git a/script/get-ipol-src/README.md b/script/get-ipol-src/README.md new file mode 100644 index 0000000000..7bf5029dff --- /dev/null +++ b/script/get-ipol-src/README.md @@ -0,0 +1,148 @@ +Automatically generated README for this automation recipe: **get-ipol-src** + +Category: **Reproducibility and artifact evaluation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ipol-src,b6fd8213d03d4aa4) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ipol-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ipol,journal,src,ipol-src* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ipol journal src ipol-src" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ipol,journal,src,ipol-src` + +`cm run script --tags=get,ipol,journal,src,ipol-src [--input_flags]` + +*or* + +`cmr "get ipol journal src ipol-src"` + +`cmr "get ipol journal src ipol-src " [--input_flags]` + + + +#### Input Flags + +* --**number**=IPOL publication number +* --**year**=IPOL publication year + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "number":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ipol,journal,src,ipol-src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ipol,journal,src,ipol-src"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ipol,journal,src,ipol-src) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ipol journal src ipol-src" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--number=value` → `CM_IPOL_NUMBER=value` +* `--year=value` → `CM_IPOL_YEAR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "number":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ipol-src/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ipol-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ipol-src/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ipol-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ipol-src/_cm.json) + +___ +### Script output +`cmr "get ipol journal src ipol-src " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_IPOL_*` +#### New environment keys auto-detected from customize + +* `CM_IPOL_PATH` \ No newline at end of file diff --git a/script/get-ipol-src/_cm.json b/script/get-ipol-src/_cm.json new file mode 100644 index 0000000000..b5faf01d8b --- /dev/null +++ b/script/get-ipol-src/_cm.json @@ -0,0 +1,41 @@ +{ + "alias": "get-ipol-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Reproducibility and artifact evaluation", + "env": { + "CM_IPOL_NUMBER": "439", + "CM_IPOL_SRC_URL": "http://www.ipol.im/pub/art/{{CM_IPOL_YEAR}}/{{CM_IPOL_NUMBER}}/{{CM_IPOL_NUMBER}}-master.zip", + "CM_IPOL_YEAR": "2022" + }, + "input_description": { + "number": "IPOL publication number", + "year": "IPOL publication year" + }, + "input_mapping": { + "number": "CM_IPOL_NUMBER", + "year": "CM_IPOL_YEAR" + }, + "extra_cache_tags_from_env": [ + { + "env": "CM_IPOL_NUMBER", + "prefix": "number-" + }, + { + "env": "CM_IPOL_YEAR", + "prefix": "year-" + } + ], + "new_env_keys": [ + "CM_IPOL_*" + ], + "tags": [ + "get", + "ipol", + "journal", + "src", + "ipol-src" + ], + "uid": "b6fd8213d03d4aa4" +} diff --git a/script/get-ipol-src/customize.py b/script/get-ipol-src/customize.py new file mode 100644 index 0000000000..f2b5dd1fca --- /dev/null +++ b/script/get-ipol-src/customize.py @@ -0,0 +1,58 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + script_path = i['artifact'].path + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_IPOL_SRC_URL'] + + year = env.get('CM_IPOL_YEAR', '') + number = env.get('CM_IPOL_NUMBER', '') + + url = url.replace('{{CM_IPOL_YEAR}}', year).replace('{{CM_IPOL_NUMBER}}', number) + + print ('Downloading from {}'.format(url)) + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url}) + if r['return']>0: return r + + filename = r['filename'] + + print ('Unzipping file {}'.format(filename)) + + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', + 'filename':filename}) + if r['return']>0: return r + + if os.path.isfile(filename): + print ('Removing file {}'.format(filename)) + os.remove(filename) + + # Get sub-directory from filename + ff = os.path.splitext(filename) + + subdir = ff[0] + + env['CM_IPOL_PATH']=os.path.join(path, subdir) + + # Applying patch + cmd = 'patch -p0 < {}'.format(os.path.join(script_path, 'patch', '20240127.patch')) + + print ('Patching code: {}'.format(cmd)) + os.system(cmd) + + return {'return':0} diff --git a/script/get-ipol-src/patch/20240127.patch b/script/get-ipol-src/patch/20240127.patch new file mode 100644 index 0000000000..6610d0ceea --- /dev/null +++ b/script/get-ipol-src/patch/20240127.patch @@ -0,0 +1,10 @@ +diff -Naur 439-master/main.py 439-master.new/main.py +--- 439-master/main.py Sat Jan 27 22:11:55 2024 ++++ 439-master.new/main.py Sat Jan 27 22:06:51 2024 +@@ -135,5 +135,5 @@ + args = parser.parse_args() + #print('before plume detection', os.path.dirname(os.path.realpath('__file__')), file=sys.stderr) + p = compute_map(args.input_0,args.input_1) +- imageio.imsave("cm.png", ((255*p[0,:,:])).numpy()) ++ imageio.imsave("cm.png", np.array((255*p[0,:,:]).numpy(), np.uint8)) + diff --git a/script/get-java/README-extra.md b/script/get-java/README-extra.md new file mode 100644 index 0000000000..232fbe6e06 --- /dev/null +++ b/script/get-java/README-extra.md @@ -0,0 +1,6 @@ +# Windows + +## Misc + +* https://jdk.java.net/java-se-ri/11 +* https://learn.microsoft.com/fr-fr/java/openjdk/download diff --git a/script/get-java/README.md b/script/get-java/README.md new file mode 100644 index 0000000000..602cf185fa --- /dev/null +++ b/script/get-java/README.md @@ -0,0 +1,167 @@ +Automatically generated README for this automation recipe: **get-java** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-java,9399d0e785704f8c) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,java* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get java" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,java` + +`cm run script --tags=get,java[,variations] [--input_flags]` + +*or* + +`cmr "get java"` + +`cmr "get java [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,java' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,java"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,java) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get java[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_install` + - Environment variables: + - *CM_JAVA_PREBUILT_INSTALL*: `on` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--install=value` → `CM_JAVA_PREBUILT_INSTALL=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "install":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_JAVA_PREBUILT_VERSION: `19` +* CM_JAVA_PREBUILT_BUILD: `36` +* CM_JAVA_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/` +* CM_JAVA_PREBUILT_FILENAME: `openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-java/_cm.json) + +___ +### Script output +`cmr "get java [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_JAVA_*` +* `JAVA_HOME` +#### New environment keys auto-detected from customize + +* `CM_JAVA_BIN` +* `CM_JAVA_CACHE_TAGS` +* `CM_JAVA_PREBUILT_EXT` +* `CM_JAVA_PREBUILT_FILENAME` +* `CM_JAVA_PREBUILT_HOST_OS` +* `CM_JAVA_PREBUILT_URL` \ No newline at end of file diff --git a/script/get-java/_cm.json b/script/get-java/_cm.json new file mode 100644 index 0000000000..a962b3ab88 --- /dev/null +++ b/script/get-java/_cm.json @@ -0,0 +1,38 @@ +{ + "alias": "get-java", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "input_mapping": { + "install": "CM_JAVA_PREBUILT_INSTALL" + }, + "default_env": { + "CM_JAVA_PREBUILT_VERSION": "19", + "CM_JAVA_PREBUILT_BUILD": "36", + "CM_JAVA_PREBUILT_URL": "https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/", + "CM_JAVA_PREBUILT_FILENAME": "openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin" + }, + "deps": [ + { + "tags": "detect,os" + } + ], + "new_env_keys": [ + "CM_JAVA_*", + "JAVA_HOME", + "+PATH" + ], + "tags": [ + "get", + "java" + ], + "variations": { + "install": { + "env": { + "CM_JAVA_PREBUILT_INSTALL":"on" + } + } + }, + "uid": "9399d0e785704f8c" +} diff --git a/script/get-java/customize.py b/script/get-java/customize.py new file mode 100644 index 0000000000..8cfc211bcf --- /dev/null +++ b/script/get-java/customize.py @@ -0,0 +1,137 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + run_script_input = i['run_script_input'] + + file_name = 'java.exe' if os_info['platform'] == 'windows' else 'java' + + cur_dir = os.getcwd() + + meta = i['meta'] + + found = False + install = env.get('CM_JAVA_PREBUILT_INSTALL','') in ['on', 'True', True] + + env_path_key = 'CM_JAVA_BIN_WITH_PATH' + + # If not force install, search for artifact + if not install: + rr = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':env_path_key, + 'run_script_input':i['run_script_input'], + 'hook': skip_path, + 'recursion_spaces':recursion_spaces}) + if rr['return'] == 0 : + found = True + elif rr['return'] != 16: + return rr + + # If not found or force install + if not found or install: + + if os_info['platform'] == 'windows': + env['CM_JAVA_PREBUILT_HOST_OS']='windows' + env['CM_JAVA_PREBUILT_EXT']='.zip' + else: + env['CM_JAVA_PREBUILT_HOST_OS']='linux' + env['CM_JAVA_PREBUILT_EXT']='.tar.gz' + + url = env['CM_JAVA_PREBUILT_URL'] + filename = env['CM_JAVA_PREBUILT_FILENAME'] + + java_prebuilt_version = env['CM_JAVA_PREBUILT_VERSION'] + java_prebuilt_build = env['CM_JAVA_PREBUILT_BUILD'] + + for key in ['CM_JAVA_PREBUILT_VERSION', + 'CM_JAVA_PREBUILT_BUILD', + 'CM_JAVA_PREBUILT_HOST_OS', + 'CM_JAVA_PREBUILT_EXT']: + url = url.replace('${'+key+'}', env[key]) + filename = filename.replace('${'+key+'}', env[key]) + + env['CM_JAVA_PREBUILT_URL'] = url + env['CM_JAVA_PREBUILT_FILENAME'] = filename + + print ('') + print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) + + rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) + if rr['return']>0: return rr + + target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') + target_file = os.path.join(target_path, file_name) + + if not os.path.isfile(target_file): + return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} + + print ('') + print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + + env[env_path_key] = target_file + + if '+PATH' not in env: env['+PATH'] = [] + env['+PATH'].append(target_path) + + return {'return':0} + +def skip_path(i): + + # Avoid not complete path on Windows + skip = False + + path = i['file'] + + if 'javapath' in path: + skip = True + + return {'return':0, 'skip':skip} + +def detect_version(i): + + r = i['automation'].parse_version({'match_text': r'\s*"(.*?)"', + 'group_number': 1, + 'env_key':'CM_JAVA_VERSION', + 'which_env':i['env'], + 'debug':True}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] >0: return r + + version = env['CM_JAVA_VERSION'] + env['CM_JAVA_CACHE_TAGS'] = 'version-'+version + + found_file_path = env['CM_JAVA_BIN_WITH_PATH'] + file_name = os.path.basename(found_file_path) + + env['CM_JAVA_BIN'] = file_name + + found_path = os.path.dirname(found_file_path) + java_home_path = os.path.dirname(found_path) + + env['JAVA_HOME'] = java_home_path + + return {'return':0, 'version': version} diff --git a/script/get-java/install-prebuilt.bat b/script/get-java/install-prebuilt.bat new file mode 100644 index 0000000000..17b00e5abc --- /dev/null +++ b/script/get-java/install-prebuilt.bat @@ -0,0 +1,9 @@ +del /Q %CM_JAVA_PREBUILT_FILENAME%.zip + +wget --no-check-certificate %CM_JAVA_PREBUILT_URL%%CM_JAVA_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip %CM_JAVA_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +del /Q %CM_JAVA_PREBUILT_FILENAME%.zip diff --git a/script/get-java/install-prebuilt.sh b/script/get-java/install-prebuilt.sh new file mode 100644 index 0000000000..575d0467ee --- /dev/null +++ b/script/get-java/install-prebuilt.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar.gz +rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar + +wget --no-check-certificate ${CM_JAVA_PREBUILT_URL}${CM_JAVA_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +gzip -d ${CM_JAVA_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +tar xvf ${CM_JAVA_PREBUILT_FILENAME}.tar +test $? -eq 0 || exit 1 + +rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar diff --git a/script/get-java/run.bat b/script/get-java/run.bat new file mode 100644 index 0000000000..0a80aa34c7 --- /dev/null +++ b/script/get-java/run.bat @@ -0,0 +1,3 @@ +"%CM_JAVA_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-java/run.sh b/script/get-java/run.sh new file mode 100644 index 0000000000..566a2b5697 --- /dev/null +++ b/script/get-java/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +${CM_JAVA_BIN_WITH_PATH} -version &> tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-javac/README-extra.md b/script/get-javac/README-extra.md new file mode 100644 index 0000000000..232fbe6e06 --- /dev/null +++ b/script/get-javac/README-extra.md @@ -0,0 +1,6 @@ +# Windows + +## Misc + +* https://jdk.java.net/java-se-ri/11 +* https://learn.microsoft.com/fr-fr/java/openjdk/download diff --git a/script/get-javac/README.md b/script/get-javac/README.md new file mode 100644 index 0000000000..127bb27bae --- /dev/null +++ b/script/get-javac/README.md @@ -0,0 +1,170 @@ +Automatically generated README for this automation recipe: **get-javac** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-javac,509280c497b24226) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,javac* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get javac" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,javac` + +`cm run script --tags=get,javac[,variations] [--input_flags]` + +*or* + +`cmr "get javac"` + +`cmr "get javac [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,javac' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,javac"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,javac) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get javac[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_install` + - Environment variables: + - *CM_JAVAC_PREBUILT_INSTALL*: `on` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--install=value` → `CM_JAVAC_PREBUILT_INSTALL=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "install":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_JAVAC_PREBUILT_VERSION: `19` +* CM_JAVAC_PREBUILT_BUILD: `36` +* CM_JAVAC_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/` +* CM_JAVAC_PREBUILT_FILENAME: `openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-javac/_cm.json) + +___ +### Script output +`cmr "get javac [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_JAVAC_*` +* `CM_JAVA_*` +* `JAVA_HOME` +#### New environment keys auto-detected from customize + +* `CM_JAVAC_BIN` +* `CM_JAVAC_CACHE_TAGS` +* `CM_JAVAC_PREBUILT_EXT` +* `CM_JAVAC_PREBUILT_FILENAME` +* `CM_JAVAC_PREBUILT_HOST_OS` +* `CM_JAVAC_PREBUILT_URL` +* `CM_JAVA_BIN` +* `CM_JAVA_BIN_WITH_PATH` \ No newline at end of file diff --git a/script/get-javac/_cm.json b/script/get-javac/_cm.json new file mode 100644 index 0000000000..995902e5a7 --- /dev/null +++ b/script/get-javac/_cm.json @@ -0,0 +1,39 @@ +{ + "alias": "get-javac", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "input_mapping": { + "install": "CM_JAVAC_PREBUILT_INSTALL" + }, + "default_env": { + "CM_JAVAC_PREBUILT_VERSION": "19", + "CM_JAVAC_PREBUILT_BUILD": "36", + "CM_JAVAC_PREBUILT_URL": "https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/", + "CM_JAVAC_PREBUILT_FILENAME": "openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin" + }, + "deps": [ + { + "tags": "detect,os" + } + ], + "new_env_keys": [ + "CM_JAVAC_*", + "CM_JAVA_*", + "JAVA_HOME", + "+PATH" + ], + "tags": [ + "get", + "javac" + ], + "variations": { + "install": { + "env": { + "CM_JAVAC_PREBUILT_INSTALL":"on" + } + } + }, + "uid": "509280c497b24226" +} diff --git a/script/get-javac/customize.py b/script/get-javac/customize.py new file mode 100644 index 0000000000..f7e076bd93 --- /dev/null +++ b/script/get-javac/customize.py @@ -0,0 +1,148 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + run_script_input = i['run_script_input'] + + file_name = 'javac.exe' if os_info['platform'] == 'windows' else 'javac' + + cur_dir = os.getcwd() + + meta = i['meta'] + + found = False + install = env.get('CM_JAVAC_PREBUILT_INSTALL','') in ['on', 'True', True] + + env_path_key = 'CM_JAVAC_BIN_WITH_PATH' + + # If not force install, search for artifact + if not install: + rr = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':env_path_key, + 'run_script_input':i['run_script_input'], + 'hook': skip_path, + 'recursion_spaces':recursion_spaces}) + if rr['return'] == 0 : + found = True + elif rr['return'] != 16: + return rr + + # If not found or force install + if not found or install: + + if os_info['platform'] == 'windows': + env['CM_JAVAC_PREBUILT_HOST_OS']='windows' + env['CM_JAVAC_PREBUILT_EXT']='.zip' + else: + env['CM_JAVAC_PREBUILT_HOST_OS']='linux' + env['CM_JAVAC_PREBUILT_EXT']='.tar.gz' + + url = env['CM_JAVAC_PREBUILT_URL'] + filename = env['CM_JAVAC_PREBUILT_FILENAME'] + + javac_prebuilt_version = env['CM_JAVAC_PREBUILT_VERSION'] + javac_prebuilt_build = env['CM_JAVAC_PREBUILT_BUILD'] + + for key in ['CM_JAVAC_PREBUILT_VERSION', + 'CM_JAVAC_PREBUILT_BUILD', + 'CM_JAVAC_PREBUILT_HOST_OS', + 'CM_JAVAC_PREBUILT_EXT']: + url = url.replace('${'+key+'}', env[key]) + filename = filename.replace('${'+key+'}', env[key]) + + env['CM_JAVAC_PREBUILT_URL'] = url + env['CM_JAVAC_PREBUILT_FILENAME'] = filename + + print ('') + print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) + + + rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) + if rr['return']>0: return rr + + target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') + target_file = os.path.join(target_path, file_name) + + if not os.path.isfile(target_file): + return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} + + print ('') + print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + + env[env_path_key] = target_file + + if '+PATH' not in env: env['+PATH'] = [] + env['+PATH'].append(target_path) + + return {'return':0} + +def skip_path(i): + + # Avoid not complete path on Windows + skip = False + + path = i['file'] + + if 'javapath' in path: + skip = True + + return {'return':0, 'skip':skip} + +def detect_version(i): + + r = i['automation'].parse_version({'match_text': r'javac\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_JAVAC_VERSION', + 'which_env':i['env'], + 'debug':True}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + r = detect_version(i) + if r['return'] >0: return r + + version = env['CM_JAVAC_VERSION'] + env['CM_JAVAC_CACHE_TAGS'] = 'version-'+version + + found_file_path = env['CM_JAVAC_BIN_WITH_PATH'] + file_name = os.path.basename(found_file_path) + file_path = os.path.dirname(found_file_path) + + env['CM_JAVAC_BIN'] = file_name + + if os_info['platform'] == 'windows': + env['CM_JAVA_BIN'] = 'java.exe' + else: + env['CM_JAVA_BIN'] = 'java' + + env['CM_JAVA_BIN_WITH_PATH'] = os.path.join(file_path, env['CM_JAVA_BIN']) + + found_path = os.path.dirname(found_file_path) + javac_home_path = os.path.dirname(found_path) + + env['JAVA_HOME'] = javac_home_path + + return {'return':0, 'version': version} diff --git a/script/get-javac/install-prebuilt.bat b/script/get-javac/install-prebuilt.bat new file mode 100644 index 0000000000..74b1c48129 --- /dev/null +++ b/script/get-javac/install-prebuilt.bat @@ -0,0 +1,9 @@ +del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip + +wget --no-check-certificate %CM_JAVAC_PREBUILT_URL%%CM_JAVAC_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip %CM_JAVAC_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip diff --git a/script/get-javac/install-prebuilt.sh b/script/get-javac/install-prebuilt.sh new file mode 100644 index 0000000000..eed1b8b013 --- /dev/null +++ b/script/get-javac/install-prebuilt.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar + +wget --no-check-certificate ${CM_JAVAC_PREBUILT_URL}${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +gzip -d ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +tar xvf ${CM_JAVAC_PREBUILT_FILENAME}.tar +test $? -eq 0 || exit 1 + +rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar diff --git a/script/get-javac/run.bat b/script/get-javac/run.bat new file mode 100644 index 0000000000..1919f559c6 --- /dev/null +++ b/script/get-javac/run.bat @@ -0,0 +1,3 @@ +"%CM_JAVAC_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-javac/run.sh b/script/get-javac/run.sh new file mode 100644 index 0000000000..40f97218d3 --- /dev/null +++ b/script/get-javac/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +${CM_JAVAC_BIN_WITH_PATH} -version &> tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-lib-armnn/README.md b/script/get-lib-armnn/README.md new file mode 100644 index 0000000000..1a684dc406 --- /dev/null +++ b/script/get-lib-armnn/README.md @@ -0,0 +1,134 @@ +Automatically generated README for this automation recipe: **get-lib-armnn** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-lib-armnn,9603a2e90fd44587) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,lib-armnn,lib,armnn* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get lib-armnn lib armnn" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,lib-armnn,lib,armnn` + +`cm run script --tags=get,lib-armnn,lib,armnn ` + +*or* + +`cmr "get lib-armnn lib armnn"` + +`cmr "get lib-armnn lib armnn " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib-armnn,lib,armnn' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,lib-armnn,lib,armnn"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,lib-armnn,lib,armnn) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get lib-armnn lib armnn" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `23.11` + +* `22.11` +* `23.05` +* `23.11` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn/_cm.json)*** + * get,git,repo,_repo.https://github.com/ARM-software/armnn + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-armnn/_cm.json) + +___ +### Script output +`cmr "get lib-armnn lib armnn " -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `CM_LIB_ARMNN_VERSION` +* `CM_LIB_DNNL_*` +#### New environment keys auto-detected from customize diff --git a/script/get-lib-armnn/_cm.json b/script/get-lib-armnn/_cm.json new file mode 100644 index 0000000000..3fa7dce5bb --- /dev/null +++ b/script/get-lib-armnn/_cm.json @@ -0,0 +1,58 @@ +{ + "alias": "get-lib-armnn", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "default_version": "23.11", + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + "CM_GIT_URL": "https://github.com/ARM-software/armnn" + }, + "prehook_deps": [ + { + "force_env_keys": [ + "CM_GIT_*" + ], + "tags": "get,git,repo,_repo.https://github.com/ARM-software/armnn" + } + ], + "new_env_keys": [ + "CM_LIB_ARMNN_VERSION", + "CM_LIB_DNNL_*", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH" + ], + "tags": [ + "get", + "lib-armnn", + "lib", + "armnn" + ], + "uid": "9603a2e90fd44587", + "versions": { + "23.11": { + "env": { + "CM_LIB_ARMNN_VERSION": "v23.11", + "CM_TMP_GIT_BRANCH_NAME": "branches/armnn_23_11" + } + }, + "23.05": { + "env": { + "CM_LIB_ARMNN_VERSION": "v23.05", + "CM_TMP_GIT_BRANCH_NAME": "branches/armnn_23_05" + } + }, + "22.11": { + "env": { + "CM_LIB_ARMNN_VERSION": "v22.11", + "CM_TMP_GIT_BRANCH_NAME": "branches/armnn_22_11" + } + } + } +} diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py new file mode 100644 index 0000000000..6476cc7894 --- /dev/null +++ b/script/get-lib-armnn/customize.py @@ -0,0 +1,51 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + version = env['CM_LIB_ARMNN_VERSION'] + if env.get('CM_HOST_PLATFORM_FLAVOR','') == 'x86_64': + url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" + elif env.get('CM_HOST_PLATFORM_FLAVOR','') == 'aarch64': + url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" + + env['CM_LIB_ARMNN_PREBUILT_BINARY_URL'] = url + env['CM_LIB_ARMNN_EXTRACT_FILENAME'] = os.path.basename(url) + + env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [] + armnn_src_path = env['CM_GIT_CHECKOUT_PATH'] + include_paths.append(os.path.join(os.getcwd(), 'include')) + include_paths.append(os.path.join(armnn_src_path, 'include')) + include_paths.append(os.path.join(armnn_src_path, 'profiling')) + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + lib_path = os.path.join(os.getcwd()) + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return':0} diff --git a/script/get-lib-armnn/run.sh b/script/get-lib-armnn/run.sh new file mode 100644 index 0000000000..4bb5d182a3 --- /dev/null +++ b/script/get-lib-armnn/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} + +wget -nc ${CM_LIB_ARMNN_PREBUILT_BINARY_URL} +tar -xvzf ${CM_LIB_ARMNN_EXTRACT_FILENAME} + +echo "******************************************************" +echo "ArmNN prebuilt binary downloaded to ${CUR_DIR} ..." diff --git a/script/get-lib-dnnl/README.md b/script/get-lib-dnnl/README.md new file mode 100644 index 0000000000..34296bad6d --- /dev/null +++ b/script/get-lib-dnnl/README.md @@ -0,0 +1,134 @@ +Automatically generated README for this automation recipe: **get-lib-dnnl** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-lib-dnnl,1cd35a6a3b0b4530) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,lib-dnnl,lib,dnnl* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get lib-dnnl lib dnnl" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,lib-dnnl,lib,dnnl` + +`cm run script --tags=get,lib-dnnl,lib,dnnl ` + +*or* + +`cmr "get lib-dnnl lib dnnl"` + +`cmr "get lib-dnnl lib dnnl " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib-dnnl,lib,dnnl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,lib-dnnl,lib,dnnl"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,lib-dnnl,lib,dnnl) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get lib-dnnl lib dnnl" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `dev` + +* `2.2.4` +* `dev` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * cmake,get-cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-dnnl/_cm.json) + +___ +### Script output +`cmr "get lib-dnnl lib dnnl " -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `CM_LIB_DNNL_*` +#### New environment keys auto-detected from customize + +* `CM_LIB_DNNL_INSTALL_DIR` \ No newline at end of file diff --git a/script/get-lib-dnnl/_cm.json b/script/get-lib-dnnl/_cm.json new file mode 100644 index 0000000000..9612fc31ea --- /dev/null +++ b/script/get-lib-dnnl/_cm.json @@ -0,0 +1,48 @@ +{ + "alias": "get-lib-dnnl", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Detection or installation of tools and artifacts", + "cache": true, + "default_version": "dev", + "deps": [ + { + "tags": "detect,cpu" + }, + { + "tags": "cmake,get-cmake" + } + ], + "env": { + "CM_DNNL_CLEAN_BUILD": "yes", + "CM_GIT_URL": "https://github.com/oneapi-src/oneDNN", + "DNNL_BUILD_EXAMPLES": "OFF", + "DNNL_BUILD_TESTS": "OFF", + "DNNL_CPU_RUNTIME": "OMP" + }, + "new_env_keys": [ + "CM_LIB_DNNL_*", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH" + ], + "tags": [ + "get", + "lib-dnnl", + "lib", + "dnnl" + ], + "uid": "1cd35a6a3b0b4530", + "versions": { + "2.2.4": { + "env": { + "CM_GIT_CHECKOUT": "v2.2.4" + } + }, + "dev": { + "env": { + "CM_GIT_CHECKOUT": "master" + } + } + } +} diff --git a/script/get-lib-dnnl/customize.py b/script/get-lib-dnnl/customize.py new file mode 100644 index 0000000000..0d03fd4de0 --- /dev/null +++ b/script/get-lib-dnnl/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + env = i['env'] + env['CM_LIB_DNNL_INSTALL_DIR'] = os.getcwd() + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if key not in env: + env[key] = [] + + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) + + lib_path = os.path.join(os.getcwd(), 'install', 'lib') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return':0} diff --git a/script/get-lib-dnnl/run.sh b/script/get-lib-dnnl/run.sh new file mode 100644 index 0000000000..ca47ee3b9b --- /dev/null +++ b/script/get-lib-dnnl/run.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} + +git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} src + +test $? -eq 0 || exit 1 + +INSTALL_DIR="${CUR_DIR}" +rm -rf ${INSTALL_DIR}/install + +cd ${INSTALL_DIR} +mkdir build +mkdir install + +echo "******************************************************" +cd build +cmake .. \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}/install" \ + -DCMAKE_BUILD_TYPE=Release \ + -DDNNL_BUILD_TESTS=${DNNL_BUILD_TESTS} \ + -DDNNL_BUILD_EXAMPLES=${DNNL_BUILD_EXAMPLES} \ + -DDNNL_CPU_RUNTIME=${DNNL_CPU_RUNTIME} \ + ../src/ +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +cmake --build . -j${CM_CPUINFO_CPUs} +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +cmake --install . +if [ "${?}" != "0" ]; then exit 1; fi + + +# Clean build directory (too large) +cd ${INSTALL_DIR} +if [ "${CM_DNNL_CLEAN_BUILD}" != "no" ]; then + rm -rf build +fi + +echo "******************************************************" +echo "DNNL was built and installed to ${INSTALL_DIR}/install ..." diff --git a/script/get-lib-protobuf/README.md b/script/get-lib-protobuf/README.md new file mode 100644 index 0000000000..a679ead8b8 --- /dev/null +++ b/script/get-lib-protobuf/README.md @@ -0,0 +1,156 @@ +Automatically generated README for this automation recipe: **get-lib-protobuf** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-lib-protobuf,db45f1eb73934f91) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,google-protobuf,protobuf,lib,lib-protobuf,google* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get google-protobuf protobuf lib lib-protobuf google" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,google-protobuf,protobuf,lib,lib-protobuf,google` + +`cm run script --tags=get,google-protobuf,protobuf,lib,lib-protobuf,google[,variations] ` + +*or* + +`cmr "get google-protobuf protobuf lib lib-protobuf google"` + +`cmr "get google-protobuf protobuf lib lib-protobuf google [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,google-protobuf,protobuf,lib,lib-protobuf,google' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,google-protobuf,protobuf,lib,lib-protobuf,google"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,google-protobuf,protobuf,lib,lib-protobuf,google) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get google-protobuf protobuf lib lib-protobuf google[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_TMP_GIT_CHECKOUT*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `1.13.0` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf/_cm.json)*** + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,gcc + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf/_cm.json)*** + * get,git,repo,_repo.https://github.com/google/protobuf.git + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-protobuf/_cm.json) + +___ +### Script output +`cmr "get google-protobuf protobuf lib lib-protobuf google [,variations]" -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `CM_GOOGLE_PROTOBUF_INSTALL_PATH` +* `CM_GOOGLE_PROTOBUF_SRC_PATH` +#### New environment keys auto-detected from customize + +* `CM_GOOGLE_PROTOBUF_INSTALL_PATH` +* `CM_GOOGLE_PROTOBUF_SRC_PATH` \ No newline at end of file diff --git a/script/get-lib-protobuf/_cm.json b/script/get-lib-protobuf/_cm.json new file mode 100644 index 0000000000..b98b0d0e35 --- /dev/null +++ b/script/get-lib-protobuf/_cm.json @@ -0,0 +1,64 @@ +{ + "alias": "get-lib-protobuf", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "default_version": "1.13.0", + "deps": [ + { + "tags": "get,cmake" + }, + { + "tags": "get,gcc" + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "CM_GOOGLE_PROTOBUF_SRC_PATH", + "CM_GOOGLE_PROTOBUF_INSTALL_PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [ + { + "force_env_keys": [ + "CM_GIT_*" + ], + "update_tags_from_env_with_prefix": { + "_repo.": [ "CM_TMP_GIT_URL" ], + "_branch.": [ "CM_TMP_GIT_CHECKOUT" ], + "_tag.": [ "CM_GIT_CHECKOUT_TAG" ] + }, + "tags": "get,git,repo,_repo.https://github.com/google/protobuf.git", + "extra_cache_tags": "lib,protobuf,src" + } + ], + "tags": [ + "get", + "google-protobuf", + "protobuf", + "lib", + "lib-protobuf", + "google" + ], + "uid": "db45f1eb73934f91", + "variations": { + "branch.#": { + "env": { + "CM_TMP_GIT_CHECKOUT": "#" + } + }, + "tag.#": { + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/get-lib-protobuf/customize.py b/script/get-lib-protobuf/customize.py new file mode 100644 index 0000000000..c9e641eb44 --- /dev/null +++ b/script/get-lib-protobuf/customize.py @@ -0,0 +1,39 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_GIT_CHECKOUT'] = "v"+env['CM_VERSION'] + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + env['+C_INCLUDE_PATH'] = [] + env['+CPLUS_INCLUDE_PATH'] = [] + env['+LD_LIBRARY_PATH'] = [] + + protobuf_install_path = os.path.join(os.getcwd(), "install") + env['CM_GOOGLE_PROTOBUF_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['CM_GOOGLE_PROTOBUF_INSTALL_PATH'] = protobuf_install_path + env['+C_INCLUDE_PATH'].append(os.path.join(protobuf_install_path, "include")) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(protobuf_install_path, "include")) + + if os.path.exists(os.path.join(protobuf_install_path, "lib")): + env['+LD_LIBRARY_PATH'].append(os.path.join(protobuf_install_path, "lib")) + elif os.path.exists(os.path.join(protobuf_install_path, "lib64")): + env['+LD_LIBRARY_PATH'].append(os.path.join(protobuf_install_path, "lib64")) + else: + return {'return':1, 'error': f'Protobuf library path not found in {protobuf_install_path}'} + + return {'return':0} diff --git a/script/get-lib-protobuf/run.sh b/script/get-lib-protobuf/run.sh new file mode 100644 index 0000000000..29c0267d1b --- /dev/null +++ b/script/get-lib-protobuf/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash +CUR=$PWD +mkdir -p install +INSTALL_DIR=$CUR/install +cd ${CM_GIT_REPO_CHECKOUT_PATH} +rm -rf build +mkdir build +cd build +export MAKEFLAGS=-j${CM_MAKE_CORES} +cmake -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=ON -DCMAKE_CXX_STANDARD=14 -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} ../cmake +test $? -eq 0 || exit $? +CMD="make install" +echo ${CMD} +eval $CMD +test $? -eq 0 || exit $? diff --git a/script/get-lib-qaic-api/README.md b/script/get-lib-qaic-api/README.md new file mode 100644 index 0000000000..4929d95ba9 --- /dev/null +++ b/script/get-lib-qaic-api/README.md @@ -0,0 +1,133 @@ +Automatically generated README for this automation recipe: **get-lib-qaic-api** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-lib-qaic-api,1e253ae184e44f23) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,api,lib-qaic-api,lib,qaic* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get api lib-qaic-api lib qaic" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,api,lib-qaic-api,lib,qaic` + +`cm run script --tags=get,api,lib-qaic-api,lib,qaic ` + +*or* + +`cmr "get api lib-qaic-api lib qaic"` + +`cmr "get api lib-qaic-api lib qaic " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,api,lib-qaic-api,lib,qaic' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,api,lib-qaic-api,lib,qaic"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,api,lib-qaic-api,lib,qaic) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get api lib-qaic-api lib qaic" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `master` + +* `master` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-lib-qaic-api/_cm.json) + +___ +### Script output +`cmr "get api lib-qaic-api lib qaic " -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `CM_LIB_QAIC_*` +* `CM_QAIC_API_*` +#### New environment keys auto-detected from customize + +* `CM_QAIC_API_INC_FILE` +* `CM_QAIC_API_SRC_FILE` \ No newline at end of file diff --git a/script/get-lib-qaic-api/_cm.json b/script/get-lib-qaic-api/_cm.json new file mode 100644 index 0000000000..f2d56d7b1f --- /dev/null +++ b/script/get-lib-qaic-api/_cm.json @@ -0,0 +1,39 @@ +{ + "alias": "get-lib-qaic-api", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "default_version": "master", + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + }, + "new_env_keys": [ + "CM_LIB_QAIC_*", + "CM_QAIC_API_*", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH" + ], + "prehook_deps": [ + ], + "tags": [ + "get", + "api", + "lib-qaic-api", + "lib", + "qaic" + ], + "uid": "1e253ae184e44f23", + "versions": { + "master": { + "env": { + "CM_LIB_QAIC_VERSION": "master" + } + } + } +} diff --git a/script/get-lib-qaic-api/customize.py b/script/get-lib-qaic-api/customize.py new file mode 100644 index 0000000000..1c95c558cd --- /dev/null +++ b/script/get-lib-qaic-api/customize.py @@ -0,0 +1,39 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + #env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [ env['CM_TMP_CURRENT_SCRIPT_PATH'] ] + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + version = "master" + env['CM_QAIC_API_SRC_FILE'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp") + env['CM_QAIC_API_INC_FILE'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h") + + return {'return':0} diff --git a/script/get-lib-qaic-api/master/QAicInfApi.cpp b/script/get-lib-qaic-api/master/QAicInfApi.cpp new file mode 100644 index 0000000000..c2b41a683b --- /dev/null +++ b/script/get-lib-qaic-api/master/QAicInfApi.cpp @@ -0,0 +1,750 @@ +// Copyright (c) 2021 Qualcomm Innovation Center, Inc. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted (subject to the limitations in the +// disclaimer below) provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// * Neither the name Qualcomm Innovation Center nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +// GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +// HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +// IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +// IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include "QAicInfApi.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace qaic_api { + +const uint32_t setSizeDefault = 10; +const uint32_t numActivationsDefault = 1; +const uint32_t numInferencesDefault = 40; +const uint32_t numThreadsPerQueueDefault = 4; +const uint32_t qidDefault = 0; + + +class ActivationSet { + + public: + ActivationSet( + QData ioDescQData, + QAicContext* context, + QAicProgram* program, QAicQueue* queue, QID dev, uint32_t numBuffers, + QAicExecObjProperties_t& execObjProperties_t, + uint32_t activationId, + QAicEventCallback callback = nullptr); + virtual ~ActivationSet(); + + // protected: + // Program is expected to be activated before calling init + QStatus init(uint32_t setSize = setSizeDefault); + QBuffer* getDmaBuffers(uint32_t execOjbIndex); + QStatus reset(); + QStatus setData(std::vector>& buffers); + QStatus setDataSingle(int set_idx, std::vector& buffers); + QStatus run(uint32_t numInferences, void* payload); + QStatus deinit(); + void setOutBufIndex(uint32_t outBufIndex) { outBufIndex_ = outBufIndex;} + std::string filename; + uint32_t getNumBuffers() { return numBuffers_; } +private: + std::vector eventExecSet_; + std::vector execObjSet_; + std::vector qbuffersSet_; + uint32_t setSize_; + QAicEvent *activationEvent_; + QAicContext *context_; + QAicProgram *program_; + QAicQueue *queue_; + QID dev_; + uint32_t numBuffers_; + QBuffer *userBuffers_; + QAicExecObjProperties_t execObjProperties_; + uint32_t activationId_; + QAicEventCallback callback_; + QData ioDescQData_; + uint32_t outBufIndex_; +}; + +//-------------------------------------------------------------------- +// ActivationSet class Implementation +//-------------------------------------------------------------------- + +ActivationSet::ActivationSet( + QData ioDescQData, + QAicContext *context, + QAicProgram *program, QAicQueue *queue, QID dev, uint32_t numBuffers, + QAicExecObjProperties_t &execObjProperties, + uint32_t activationId, + QAicEventCallback callback) + : context_(context), program_(program), queue_(queue), + dev_(dev), numBuffers_(numBuffers), userBuffers_(nullptr), + execObjProperties_(execObjProperties), + activationId_(activationId), callback_(callback), ioDescQData_(ioDescQData) {} + +ActivationSet::~ActivationSet() {} + +QStatus ActivationSet::deinit() { + QStatus status = QS_SUCCESS; + + for (auto &e : execObjSet_) { + + status = qaicReleaseExecObj(e); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release Exec obj" << std::endl; + return status; + } + } + for (auto &ev : eventExecSet_) { + status = qaicReleaseEvent(ev); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release Event obj" << std::endl; + return status; + } + } + return status; +} + +QBuffer* ActivationSet::getDmaBuffers(uint32_t execObjIndex) { + return qbuffersSet_[execObjIndex]; +} + +QStatus ActivationSet::init(uint32_t setSize) { + QStatus status = QS_SUCCESS; + + setSize_ = setSize; + + qbuffersSet_.resize(setSize_); + + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + // std::cout << "Zero Copy enabled\n"; + execObjProperties_ |= QAIC_EXECOBJ_PROPERTIES_ZERO_COPY_BUFFERS; + } + + for (uint32_t i = 0; i < setSize_; i++) { + QAicExecObj *execObj = nullptr; + qbuffersSet_[i] = nullptr; + // nullptr is passed as the ioDesc indicating we will use the default + // ioDescriptor. + status = qaicCreateExecObj( + context_, &execObj, &execObjProperties_, program_, + (ioDescQData_.data)?(&ioDescQData_):nullptr, + nullptr, nullptr); + if ((status != QS_SUCCESS) || (execObj == nullptr)) { + std::cerr << "Failed to create Exec obj" << std::endl; + return status; + } + execObjSet_.push_back(execObj); + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + const QAicApiFunctionTable *aicApi_ = qaicGetFunctionTable(); + status = aicApi_ -> qaicExecObjGetIoBuffers( execObj, &numBuffers_, &qbuffersSet_[i]); + if ((status != QS_SUCCESS)) { + std::cerr << "Failed to get IO buffers" << std::endl; + return status; + } + } + + QAicEvent *event = nullptr; + status = qaicCreateEvent(context_, &event, + QAIC_EVENT_DEVICE_COMPLETE); + if ((status != QS_SUCCESS) || (event == nullptr)) { + std::cerr << "Failed to create Event" << std::endl; + return status; + } + eventExecSet_.push_back(event); + } + return QS_SUCCESS; +} + +QStatus ActivationSet::setData(std::vector> &buffers) { + QStatus status = QS_SUCCESS; + int i = 0; + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + // no setdata is required when using dma buf path + + std::cerr << "no setdata is required when using dma buf path" << std::endl; + return status; + } + for (auto &e : execObjSet_) { + status = qaicExecObjSetData(e, buffers[i].size(), buffers[i].data()); + if (status != QS_SUCCESS) { + return status; + } + ++i; + } + //userBuffers_ = userBuffers; + return status; +} + +QStatus ActivationSet::setDataSingle(int set_idx, std::vector &buffers) { + QStatus status = QS_SUCCESS; + + status = qaicExecObjSetData(execObjSet_[set_idx], buffers.size(), buffers.data()); + if (status != QS_SUCCESS) { + std::cout << "tried to set " << set_idx << " " << buffers.data() << " " << buffers.size() << std::endl; + return status; + } + + return status; +} + + +QStatus ActivationSet::run(uint32_t index, void* payload) { + QStatus status; + + //std::cout << "clearing event for " << index << " " << payload << std::endl; + status = qaicEventClear(eventExecSet_.at(index)); + if (status != QS_SUCCESS) { + return status; + } + + qaicEventRemoveCallback(eventExecSet_.at(index), callback_); + + status = qaicEventAddCallback(eventExecSet_.at(index), callback_, payload); + if (status != QS_SUCCESS) { + return status; + } + + //std::cout << "Enqueuing work " << index << " " << payload << std::endl; + status = qaicEnqueueExecObj(queue_, execObjSet_.at(index), + eventExecSet_.at(index)); + if (status != QS_SUCCESS) { + return status; + } + + //std::cout << "Creating callback " << index << " " << payload << std::endl; + return QS_SUCCESS; +} + + + +//------------------------------------------------------------------ +// QAIC Runner Example Class Implementation +//------------------------------------------------------------------ +QAicInfApi::QAicInfApi() + : + context_(nullptr), + constants_(nullptr), contextProperties_(QAIC_CONTEXT_DEFAULT), + execObjProperties_(QAIC_EXECOBJ_PROPERTIES_DEFAULT), + queueProperties_{QAIC_QUEUE_PROPERTIES_ENABLE_MULTI_THREADED_QUEUES, + numThreadsPerQueueDefault}, + dev_(0), + numActivations_(numActivationsDefault), + numInferences_(numInferencesDefault), + numThreadsPerQueue_(numThreadsPerQueueDefault), setSize_(setSizeDefault), + activated_(false), entryPoint_("default") { +} + +QAicInfApi::~QAicInfApi() { + QStatus status; + + for (uint32_t i = 0; i < programs_.size(); i++) { + status = qaicReleaseProgram(programs_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release program" << std::endl; + } + } + + for (uint32_t i = 0; i < queues_.size(); i++) { + status = qaicReleaseQueue(queues_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release queue" << std::endl; + } + queues_[i] = nullptr; + } + + if (constants_ != nullptr) { + status = qaicReleaseConstants(constants_); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release constants" << std::endl; + } + } + + shActivationSets_.clear(); + + if (context_ != nullptr) { + status = qaicReleaseContext(context_); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release context" << std::endl; + } + context_ = nullptr; + } + + inferenceBufferVector_.clear(); +} + +void QAicInfApi::setSkipStage(std::string qaic_skip_stage) { + if (!qaic_skip_stage.empty()) { + entryPoint_ = qaic_skip_stage; + } +} + +QStatus QAicInfApi::loadFileType( + const std::string &filePath, size_t &sizeLoaded, uint8_t *&dataPtr, + std::vector> &vector) { + uint64_t fileSize; + std::ifstream infile; + infile.open(filePath, std::ios::binary | std::ios::in); + if (!infile.is_open()) { + std::cerr << "Failed to open file: " << filePath << std::endl; + return QS_ERROR; + } + + infile.seekg(0, infile.end); + fileSize = infile.tellg(); + infile.seekg(0, infile.beg); + std::unique_ptr uniqueBuffer = + std::unique_ptr(new (std::nothrow) uint8_t[fileSize]); + if (uniqueBuffer == nullptr) { + std::cerr << "Failed to allocate buffer for file " << filePath + << " of size " << fileSize << std::endl; + return QS_ERROR; + } + infile.read((char *)uniqueBuffer.get(), fileSize); + if (!infile) { + std::cerr << "Failed to read all data from file " << filePath << std::endl; + return QS_ERROR; + } + dataPtr = uniqueBuffer.get(); + vector.emplace_back(std::move(uniqueBuffer)); + sizeLoaded = fileSize; + return QS_SUCCESS; +} + + +QStatus QAicInfApi::init(QID qid, QAicEventCallback callback) { + QStatus status = QS_SUCCESS; + + callback_ = callback; + //std::cout << "callback - " << (void*)callback_ << std::endl; + + dev_ = qid; + + // validate if device is available + QDevInfo devInfo; + status = qaicGetDeviceInfo(dev_, &devInfo); + if (status == QS_SUCCESS) { + if (devInfo.devStatus != QDS_READY) { + std::cerr << "Device:" << dev_ << " not in ready state" << std::endl; + exit(1); + } + } else { + std::cerr << "Invalid device:" << std::to_string(dev_) << std::endl; + exit(1); + } + + // Check Library Compatibility + { + uint16_t major; + uint16_t minor; + const char *patch; + const char *variant; + status = qaicGetAicVersion(&major, &minor, &patch, &variant); + + if (status != QS_SUCCESS) { + std::cerr << "Unable to retrieve AicVersion" << std::endl; + exit(1); + } + if ((major != LRT_LIB_MAJOR_VERSION) || (minor < LRT_LIB_MINOR_VERSION)) { + std::cerr << "AicApi Header is not compatible with Library, lib:" << major + << "." << minor << " header:" << LRT_LIB_MAJOR_VERSION << "." + << LRT_LIB_MINOR_VERSION << std::endl; + exit(1); + } + } + + status = qaicCreateContext(&context_, &contextProperties_, 1, &dev_, + logCallback, errorHandler, nullptr); + if ((context_ == nullptr) || (status != QS_SUCCESS)) { + std::cerr << "Failed to Create Context" << std::endl; + return status; + } + + for (uint32_t i = 0; i < modelBasePaths_.size() ; i++) { + + QBuffer programQpcBuf_; + QAicProgramProperties_t programProperties_; + std::vector> programBufferVector_; + QAicQpcObj *qpcObj_; + + std::string filePath = modelBasePaths_[i] + "/programqpc.bin"; + + + // Load file + status = loadFileType(filePath, programQpcBuf_.size, programQpcBuf_.buf, + programBufferVector_); + + + //------------------------------------------------------------------------- + // Create Programs + // It is valid to pass a null for constants, if null program will + // disregard constants + //------------------------------------------------------------------------- + // Initialize the program properties with default. + status = qaicProgramPropertiesInitDefault(&programProperties_); + if (status != QS_SUCCESS) { + std::cerr << "Failed to initialize program properties." << std::endl; + return status; + } + + status = qaicOpenQpc(&qpcObj_, programQpcBuf_.buf, programQpcBuf_.size, false); + if (status != QS_SUCCESS) { + std::cerr << "Failed to open Qpc." << std::endl; + return status; + } + + const char *name = "progName"; + QAicProgram *program = nullptr; + + status = qaicCreateProgram( + context_, &program, &programProperties_, dev_, name, qpcObj_); + + if ((program == nullptr) || (status != QS_SUCCESS)) { + std::cerr << "Failed to create program" << std::endl; + return status; + } + programs_.push_back(program); + } + + + //------------------------------------------------------------------------- + // Load Programs QAicInfApi(uint32_t dummy); + + // User may choose to explicitly load program, or let the driver load + // the program when it is needed. + // For this reason the following code is commented out, to demonstrate + // automatic loading and activation + //------------------------------------------------------------------------- + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + QStatus status; + status = qaicLoadProgram(programs_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to load program" << std::endl; + return status; + } + } + //------------------------------------------------------------------------- + // Activate Programs + // User may choose to explicitly activate program, or let the driver + // activate the program when it is needed. + // For this reason the following code is commented out, to demonstrate + // automatic loading and activation + //------------------------------------------------------------------------- + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + QStatus status; + status = qaicRunActivationCmd(programs_[i], + QAIC_PROGRAM_CMD_ACTIVATE_FULL); + if (status != QS_SUCCESS) { + std::cerr << "Failed to enqueue Activation command" << std::endl; + return status; + } + } + + //------------------------------------------------------------------------- + // Create Queues for Execution + //------------------------------------------------------------------------- + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + + QAicQueue *queue = nullptr; + status = + qaicCreateQueue(context_, &queue, &queueProperties_, dev_); + if ((queue == nullptr) || (status != QS_SUCCESS)) { + std::cerr << "Failed to create queue" << std::endl; + return status; + } + queues_.push_back(queue); + } + + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + + QData ioDescQData; + ioDescQData.data = nullptr; + ioDescQData.size = 0; + aicapi::IoDesc ioDescProto; + status = qaicProgramGetIoDescriptor(programs_[i], &ioDescQData); + if (ioDescQData.data == nullptr) { + std::cerr << "Failed to get iodesc" << std::endl; + return QS_ERROR; + } + ioDescProto.ParseFromArray(ioDescQData.data, ioDescQData.size); + if (!entryPoint_.empty() && entryPoint_.compare("default") != 0) { + for (auto &io_set : ioDescProto.io_sets()) { + if (io_set.name().compare(entryPoint_) == 0) { + ioDescProto.clear_selected_set(); + ioDescProto.mutable_selected_set()->CopyFrom(io_set); + break; + } + } + if(ioDescProto.selected_set().name().compare(entryPoint_) != 0) { + std::cerr << "Failed to match name in iodesc" << std::endl; + return QS_ERROR; + } + + try { + customizedIoDescProtoBuffer_.resize(ioDescProto.ByteSizeLong()); + } catch (const std::bad_alloc &e) { + std::cerr << "vector resize failed for protocol Buffer -"<< e.what()< shActivation = + std::make_shared(ioDescQData, + context_, programs_[i], queues_[i], dev_, + numBuffers, + execObjProperties_, i, callback_); + if (shActivation != nullptr) { + shActivation->init(setSize_); + shActivationSets_.emplace_back(shActivation); + } + + // Create IO buffers + status = createBuffers(i, ioDescProto, shActivation); + if (status != QS_SUCCESS) { + std::cerr << "Failed to create IO buffers." << std::endl; + return status; + } + } + + if (!(!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable"))) { + setData(); + } + + return QS_SUCCESS; +} + +QStatus QAicInfApi::createBuffers(int idx, aicapi::IoDesc& ioDescProto, std::shared_ptr shActivation) { + + inferenceBuffersList_.resize(inferenceBuffersList_.size() + 1); + + inferenceBuffersList_[idx].resize(setSize_); + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + for (uint32_t y = 0; y < setSize_; y++) { + + QBuffer* dmaBuffVect = shActivation->getDmaBuffers(y); + + for (uint32_t i = 0; i < shActivation->getNumBuffers(); i++) { + inferenceBuffersList_[idx][y].push_back(dmaBuffVect[i]); + } + } + return QS_SUCCESS; + } + + for (uint32_t y = 0; y < setSize_; y++) { + + for (uint32_t i = 0; i < ioDescProto.selected_set().bindings().size(); i++) { + if (ioDescProto.selected_set().bindings(i).dir() == aicapi::BUFFER_IO_TYPE_OUTPUT) { + QBuffer buf; + uint32_t outputBufferSize = ioDescProto.selected_set().bindings(i).size(); + std::unique_ptr uniqueBuffer = std::unique_ptr( + // over allocate to allow for buffer alignment + new(std::nothrow) uint8_t[outputBufferSize + 32]); + if (uniqueBuffer == nullptr) { + std::cerr << "Failed to allocate buffer for output, size " + << outputBufferSize << std::endl; + return QS_ERROR; + } + buf.buf = uniqueBuffer.get(); + + //align the buffer to 32 byte boundary + uint64_t mask = 31; + mask = ~mask; + buf.buf = (uint8_t*)((uint64_t)(buf.buf + 32) & mask); + + buf.size = outputBufferSize; + inferenceBufferVector_.push_back(std::move(uniqueBuffer)); + inferenceBuffersList_[idx][y].push_back(std::move(buf)); + } else if (ioDescProto.selected_set().bindings(i).dir() == aicapi::BUFFER_IO_TYPE_INPUT) { + QBuffer buf = QBuffer(); + uint32_t inputBufferSize = ioDescProto.selected_set().bindings(i).size(); + + std::unique_ptr uniqueBuffer = std::unique_ptr( + // over allocate to allow for buffer alignment + new(std::nothrow) uint8_t[inputBufferSize + 32]); + if (uniqueBuffer == nullptr) { + std::cerr << "Failed to allocate input buffer" << std::endl; + return QS_ERROR; + } + buf.buf = uniqueBuffer.get(); + + //align the buffer to 32 byte boundary + uint64_t mask = 31; + mask = ~mask; + buf.buf = (uint8_t*)((uint64_t)(buf.buf + 32) & mask); + + buf.size = inputBufferSize; + inferenceBufferVector_.push_back(std::move(uniqueBuffer)); + inferenceBuffersList_[idx][y].push_back(std::move(buf)); + } + } + } + + return QS_SUCCESS; +} + +QStatus QAicInfApi::setData() { + + //-------------------------------------- + // Set data in buffers + //-------------------------------------- + int x = 0; + for (auto &a : shActivationSets_) { + if (a != nullptr) { + a->setData(inferenceBuffersList_[x]); + } + ++x; + } + + return QS_SUCCESS; +} + + +//---------------------------------------------------------------- +// Run Inferences +//---------------------------------------------------------------- +QStatus QAicInfApi::run(uint32_t activation, + uint32_t execobj, + void* payload) { + QStatus status = QS_SUCCESS; + //setData(); + + shActivationSets_[activation]->run(execobj, payload); + + return status; +} +/*QStatus qaicExecObjGetIoBuffers(const QAicExecObj *execObj, + uint32_t *numBuffers, QBuffer **buffers) { + if ((execObj == nullptr) || (execObj->shExecObj == nullptr) || + (numBuffers == nullptr) || (buffers == nullptr)) { + // LogErrorG("Invalid null pointer"); + return QS_INVAL; + } + return execObj->shExecObj->getIoBuffers(*numBuffers, *buffers); +}*/ + +QStatus QAicInfApi::deinit() { + QStatus status; + + for (auto &a : shActivationSets_) { + if (a != nullptr) { + status = a->deinit(); + if (status != QS_SUCCESS) { + return status; + } + } + } + + if (activated_ == false) { + return QS_SUCCESS; + } + + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + qaicRunActivationCmd(programs_.at(i), + QAIC_PROGRAM_CMD_DEACTIVATE_FULL); + } + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + status = qaicUnloadProgram(programs_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to unload program" << std::endl; + return status; + } + } + + return QS_SUCCESS; +} + +// Kept to keep backwards compatibility for resnets 50 and 34. +void QAicInfApi::setNumActivations(uint32_t num) { + + for(int i=0 ; i(ptr); + + QStatus status = QS_SUCCESS; + + status = shActivationSets_[act_idx]->setDataSingle(set_idx, inferenceBuffersList_[act_idx][set_idx]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to set data." << std::endl; + return status; + } + + return status; +} + + + +} + diff --git a/script/get-lib-qaic-api/master/QAicInfApi.h b/script/get-lib-qaic-api/master/QAicInfApi.h new file mode 100644 index 0000000000..3af6f1f335 --- /dev/null +++ b/script/get-lib-qaic-api/master/QAicInfApi.h @@ -0,0 +1,146 @@ +// Copyright (c) 2021 Qualcomm Innovation Center, Inc. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted (subject to the limitations in the +// disclaimer below) provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// * Neither the name Qualcomm Innovation Center nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +// GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +// HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +// IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +// IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef QAIC_DEVICE_H_ +#define QAIC_DEVICE_H_ + +#include "QAicApi.h" +#include "QAicApi.pb.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace qaic_api { + +extern const uint32_t setSizeDefault; +extern const uint32_t numActivationsDefault; +extern const uint32_t numInferencesDefault; +extern const uint32_t numThreadsPerQueueDefault; +extern const uint32_t qidDefault; + +class ActivationSet; + +class QAicInfApi { +public: + QAicInfApi(); + + virtual ~QAicInfApi(); + static void logCallback(QLogLevel logLevel, const char *str) { + std::cout << str; + } + + static void errorHandler(QAicContextID id, const char *errInfo, + QAicErrorType errType, const void *errData, + size_t errDataSize, void *userData) { + std::cout << "Received Error Handler CB: id " << id << "msg: " << errInfo + << std::endl; + } + + void setModelBasePath(std::string modelBasePath); + void setNumActivations(uint32_t num); + void setNumThreadsPerQueue(uint32_t num); + void setSetSize(uint32_t num); + void setLibPath(std::string &aicLibPath); + void setSkipStage(std::string qaic_skip_stage); + + // Initialize Driver, Run, De-Init, get Results + + QStatus init(QID qid, QAicEventCallback callback); + QStatus loadDataset(); + QStatus setData(); + QStatus createBuffers(int idx, aicapi::IoDesc& ioDescProto, std::shared_ptr); + + QStatus run(uint32_t activation, uint32_t execobj, void* payload); + + QStatus deinit(); + uint64_t getInfCompletedCount(); + bool isBatchMode(); + + void* getBufferPtr(uint32_t act_idx,uint32_t exec_idx, uint32_t buf_idx) { + return inferenceBuffersList_[act_idx][exec_idx][buf_idx].buf; + } + + QStatus setBufferPtr(uint32_t act_idx, uint32_t set_idx, uint32_t buf_idx, void* ptr); + +private: + QStatus loadFileType(const std::string &filePath, size_t &sizeLoaded, + uint8_t *&dataPtr, + std::vector> &vector); + QAicContext *context_; + QAicConstants *constants_; + std::vector programs_; + // Properties + QAicContextProperties_t contextProperties_; + QAicConstantsProperties_t constantsProperties_; + QAicExecObjProperties_t execObjProperties_; + QAicQueueProperties queueProperties_; + + std::vector>> inferenceBuffersList_; + + // Per Activation Resources + std::vector queues_; + std::vector perQueueFinishEvents_; + std::vector> shActivationSets_; + QBuffer constDescBuf_; + QBuffer constBuf_; + QBuffer networkDescBuf_; + QBuffer progBuf_; + QID dev_; + std::vector modelBasePaths_; + std::vector> inferenceBufferVector_; + uint32_t numActivations_; + uint32_t numInferences_; + uint32_t numThreadsPerQueue_; + uint32_t setSize_; + bool activated_; + std::vector infDataSet; + + // Callback + QAicEventCallback callback_; + std::string entryPoint_; + std::vector customizedIoDescProtoBuffer_; +}; // QAicInfApi + +} // namespace qaic_device + +#endif diff --git a/script/get-lib-qaic-api/run.sh b/script/get-lib-qaic-api/run.sh new file mode 100644 index 0000000000..c880c1f3f0 --- /dev/null +++ b/script/get-lib-qaic-api/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} + diff --git a/script/get-llvm/README-extra.md b/script/get-llvm/README-extra.md new file mode 100644 index 0000000000..8020e09ba0 --- /dev/null +++ b/script/get-llvm/README-extra.md @@ -0,0 +1,96 @@ +# Get LLVM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). + +## Exported Variables +* `CM_LLVM_CLANG_BIN` +* `CM_LLVM_CLANG_BIN_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_WITH_PATH` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_COMPILER_*` +* `CM_LINKER_*` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 + +# CLI + +## Default +```bash +cm run script "get llvm" +``` +or +```bash +cm run script --tags=get,llvm +``` + +## Version + +```bash +cm run script "get llvm" --version=14.0.0 +``` + +## Version min +```bash +cm run script "get llvm" --version_min=12.0.0 +``` + +## Version max +```bash +cm run script "get llvm" --version_max=13.999.999 --version_max_usable=13.0.0 +``` + +## Detect llvm3 in non-standard path +```bash +cm run script "get llvm" --path={directory with llvm} +``` + +### Detect llvm with non-standard name +```bash +cm run script "get llvm" --input={full path to clang} +``` + +## Force new detection even if llvm is already found and cached +```bash +cm run script "get llvm" --new +``` + +## Test + +```bash +cm run script "app image corner-detection" +``` + +## Reproducibility matrix + +*Test detection and installation on different platforms* + +* Windows, Linux, MacOS + +### RHEL 9 + +#### v14.0.0: ✓ + +```bash +cm rm cache -f +cm run script "get llvm" --version=14.0.0 +cm run script "app image corner-detection" +``` + +#### v13.0.0: Need special command + +```bash +cm rm cache -f +cm run script "get llvm" --version=13.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` + +#### v12.0.0: Need special command + +```bash +cm rm cache -f +cm run script "get llvm" --version=12.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` diff --git a/script/get-llvm/README.md b/script/get-llvm/README.md new file mode 100644 index 0000000000..f7b9ae99bd --- /dev/null +++ b/script/get-llvm/README.md @@ -0,0 +1,176 @@ +Automatically generated README for this automation recipe: **get-llvm** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-llvm,99832a103ed04eb8) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,llvm,compiler,c-compiler,cpp-compiler,get-llvm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get llvm compiler c-compiler cpp-compiler get-llvm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,llvm,compiler,c-compiler,cpp-compiler,get-llvm` + +`cm run script --tags=get,llvm,compiler,c-compiler,cpp-compiler,get-llvm[,variations] ` + +*or* + +`cmr "get llvm compiler c-compiler cpp-compiler get-llvm"` + +`cmr "get llvm compiler c-compiler cpp-compiler get-llvm [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,llvm,compiler,c-compiler,cpp-compiler,get-llvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,llvm,compiler,c-compiler,cpp-compiler,get-llvm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,llvm,compiler,c-compiler,cpp-compiler,get-llvm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get llvm compiler c-compiler cpp-compiler get-llvm[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_from-prebuilt` + - Workflow: + * `_from-src` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/_cm.json)*** + * install,llvm + * `if (CM_REQUIRE_INSTALL == yes)` + * CM names: `--adr.llvm-install...` + - CM script: [install-llvm-prebuilt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-prebuilt) + - CM script: [install-llvm-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-src) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-llvm/_cm.json)*** + * get,compiler-flags + - CM script: [get-compiler-flags](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-compiler-flags) + +___ +### Script output +`cmr "get llvm compiler c-compiler cpp-compiler get-llvm [,variations]" -j` +#### New environment keys (filter) + +* `+ CFLAGS` +* `+ CXXFLAGS` +* `+ FFLAGS` +* `+ LDFLAGS` +* `+CM_HOST_OS_DEFAULT_INCLUDE_PATH` +* `+PATH` +* `CM_COMPILER_*` +* `CM_CXX_COMPILER_*` +* `CM_C_COMPILER_*` +* `CM_LINKER_*` +* `CM_LLVM_*` +#### New environment keys auto-detected from customize + +* `CM_COMPILER_CACHE_TAGS` +* `CM_COMPILER_FAMILY` +* `CM_COMPILER_FLAGS_DEBUG` +* `CM_COMPILER_FLAGS_DEFAULT` +* `CM_COMPILER_FLAGS_FAST` +* `CM_COMPILER_VERSION` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_FLAG_INCLUDE` +* `CM_CXX_COMPILER_FLAG_OUTPUT` +* `CM_CXX_COMPILER_FLAG_VERSION` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_FLAG_INCLUDE` +* `CM_C_COMPILER_FLAG_OUTPUT` +* `CM_C_COMPILER_FLAG_VERSION` +* `CM_C_COMPILER_WITH_PATH` +* `CM_LINKER_FLAGS_DEBUG` +* `CM_LINKER_FLAGS_DEFAULT` +* `CM_LINKER_FLAGS_FAST` +* `CM_LLVM_CLANG_BIN` +* `CM_LLVM_CLANG_CACHE_TAGS` \ No newline at end of file diff --git a/script/get-llvm/_cm.json b/script/get-llvm/_cm.json new file mode 100644 index 0000000000..cd998ce5a3 --- /dev/null +++ b/script/get-llvm/_cm.json @@ -0,0 +1,68 @@ +{ + "alias": "get-llvm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Compiler automation", + "cache": true, + "clean_files": [], + "env": { + "CM_REQUIRE_INSTALL": "no" + }, + "name": "Detect or install LLVM compiler", + "new_env_keys": [ + "CM_LLVM_*", + "CM_C_COMPILER_*", + "CM_CXX_COMPILER_*", + "CM_COMPILER_*", + "CM_LINKER_*", + "+ CFLAGS", + "+ CXXFLAGS", + "+ FFLAGS", + "+ LDFLAGS", + "+CM_HOST_OS_DEFAULT_INCLUDE_PATH", + "+PATH" + ], + "post_deps": [ + { + "tags": "get,compiler-flags" + } + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,llvm", + "names": "llvm-install" + } + ], + "sort": 100, + "tags": [ + "get", + "llvm", + "compiler", + "c-compiler", + "cpp-compiler", + "get-llvm" + ], + "uid": "99832a103ed04eb8", + "variations": { + "from-prebuilt": { + "ad": { + "llvm-install": { + "tags": "prebuilt" + } + } + }, + "from-src": { + "ad": { + "llvm-install": { + "tags": "src,_clang" + } + } + } + } +} diff --git a/script/get-llvm/customize.py b/script/get-llvm/customize.py new file mode 100644 index 0000000000..c9d872a23f --- /dev/null +++ b/script/get-llvm/customize.py @@ -0,0 +1,91 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name_c = 'clang.exe' if os_info['platform'] == 'windows' else 'clang' + + env['FILE_NAME_C'] = file_name_c + + if 'CM_LLVM_CLANG_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name_c, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_LLVM_CLANG_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + + r = i['automation'].parse_version({'match_text': r'clang version\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_LLVM_CLANG_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] >0: return r + + version = env['CM_LLVM_CLANG_VERSION'] + env['CM_LLVM_CLANG_CACHE_TAGS'] = 'version-'+version + env['CM_COMPILER_CACHE_TAGS'] = 'version-'+version+',family-llvm' + env['CM_COMPILER_FAMILY'] = 'LLVM' + env['CM_COMPILER_VERSION'] = env['CM_LLVM_CLANG_VERSION'] + + found_file_path = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + file_name_c = os.path.basename(found_file_path) + file_name_cpp = file_name_c.replace("clang", "clang++") + + env['CM_LLVM_CLANG_BIN']=file_name_c + + # General compiler for general program compilation + env['CM_C_COMPILER_BIN']=file_name_c + env['CM_C_COMPILER_WITH_PATH']=found_file_path + env['CM_C_COMPILER_FLAG_OUTPUT']='-o ' + env['CM_C_COMPILER_FLAG_VERSION']='--version' + env['CM_C_COMPILER_FLAG_INCLUDE']='-I' + + env['CM_CXX_COMPILER_BIN']=file_name_cpp + env['CM_CXX_COMPILER_WITH_PATH']=os.path.join(found_path, file_name_cpp) + env['CM_CXX_COMPILER_FLAG_OUTPUT']='-o ' + env['CM_CXX_COMPILER_FLAG_VERSION']='--version' + env['CM_CXX_COMPILER_FLAG_INCLUDE']='-I' + + env['CM_COMPILER_FLAGS_FAST'] = "-O4" + env['CM_LINKER_FLAGS_FAST'] = "-O4" # "-flto" - this flag is not always available (requires LLVMgold.so) + env['CM_COMPILER_FLAGS_DEBUG'] = "-O0" + env['CM_LINKER_FLAGS_DEBUG'] = "-O0" + env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['CM_LINKER_FLAGS_DEFAULT'] = "-O2" + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + return {'return':0, 'version': version} diff --git a/script/get-llvm/run.bat b/script/get-llvm/run.bat new file mode 100644 index 0000000000..632b201da7 --- /dev/null +++ b/script/get-llvm/run.bat @@ -0,0 +1,3 @@ +%CM_LLVM_CLANG_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-llvm/run.sh b/script/get-llvm/run.sh new file mode 100644 index 0000000000..c24cbb1adb --- /dev/null +++ b/script/get-llvm/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +clang_bin=${CM_LLVM_CLANG_BIN_WITH_PATH} +${clang_bin} --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-microtvm/README-extra.md b/script/get-microtvm/README-extra.md new file mode 100644 index 0000000000..5e88765194 --- /dev/null +++ b/script/get-microtvm/README-extra.md @@ -0,0 +1,5 @@ +# GET-MICROTVM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [Microtvm](https://github.com/octoml/microtvm) and cache it in CM for reuse across other CM scripts. + +## Exported Variables +1. [CN_MICROTVM_SOURCE](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-microtvm/customize.py#L24): Location in CM cache where microtvm git repository is cloned. diff --git a/script/get-microtvm/README.md b/script/get-microtvm/README.md new file mode 100644 index 0000000000..be63d376b7 --- /dev/null +++ b/script/get-microtvm/README.md @@ -0,0 +1,164 @@ +Automatically generated README for this automation recipe: **get-microtvm** + +Category: **TinyML automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-microtvm,a9cad70972a140b9) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,src,source,microtvm,tiny* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get src source microtvm tiny" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,src,source,microtvm,tiny` + +`cm run script --tags=get,src,source,microtvm,tiny[,variations] [--input_flags]` + +*or* + +`cmr "get src source microtvm tiny"` + +`cmr "get src source microtvm tiny [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,microtvm,tiny' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,src,source,microtvm,tiny"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,src,source,microtvm,tiny) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get src source microtvm tiny[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_full-history` + - Environment variables: + - *CM_GIT_DEPTH*: `--depth 10` + - Workflow: + * `_short-history` + - Environment variables: + - *CM_GIT_DEPTH*: `--depth 10` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--ssh=value` → `CM_GIT_SSH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "ssh":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `main` + +* `custom` +* `main` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-microtvm/_cm.json) + +___ +### Script output +`cmr "get src source microtvm tiny [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MICROTVM_*` +#### New environment keys auto-detected from customize + +* `CM_MICROTVM_SOURCE` \ No newline at end of file diff --git a/script/get-microtvm/_cm.json b/script/get-microtvm/_cm.json new file mode 100644 index 0000000000..994a2ed416 --- /dev/null +++ b/script/get-microtvm/_cm.json @@ -0,0 +1,56 @@ +{ + "alias": "get-microtvm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category":"TinyML automation", + "default_version": "main", + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + "CM_GIT_AUTH": "yes", + "CM_GIT_DEPTH": "", + "CM_GIT_PATCH": "no", + "CM_GIT_URL": "https://github.com/mlcommons/tiny_results_v1.0" + }, + "input_mapping": { + "ssh": "CM_GIT_SSH" + }, + "local_env_keys": [ + "CM_GIT_*" + ], + "new_env_keys": [ + "CM_MICROTVM_*" + ], + "tags": [ + "get", + "src", + "source", + "microtvm", + "tiny" + ], + "uid": "a9cad70972a140b9", + "variations": { + "full-history": { + "env": { + "CM_GIT_DEPTH": "--depth 10" + } + }, + "short-history": { + "env": { + "CM_GIT_DEPTH": "--depth 10" + } + } + }, + "versions": { + "custom": {}, + "main": { + "env": { + "CM_GIT_CHECKOUT": "main" + } + } + } +} diff --git a/script/get-microtvm/customize.py b/script/get-microtvm/customize.py new file mode 100644 index 0000000000..327760590a --- /dev/null +++ b/script/get-microtvm/customize.py @@ -0,0 +1,26 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + return {'return':0} + +def postprocess(i): + + env = i['env'] + state = i['state'] + + env['CM_MICROTVM_SOURCE'] = os.path.join(os.getcwd(), 'microtvm') + + return {'return':0} diff --git a/script/get-microtvm/run.sh b/script/get-microtvm/run.sh new file mode 100644 index 0000000000..2bffb48d80 --- /dev/null +++ b/script/get-microtvm/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning microtvm from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + +if [ ! -d "microtvm" ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} microtvm + if [ "${?}" != "0" ]; then exit 1; fi +fi diff --git a/script/get-ml-model-3d-unet-kits19/README.md b/script/get-ml-model-3d-unet-kits19/README.md new file mode 100644 index 0000000000..b13933fd25 --- /dev/null +++ b/script/get-ml-model-3d-unet-kits19/README.md @@ -0,0 +1,202 @@ +Automatically generated README for this automation recipe: **get-ml-model-3d-unet-kits19** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-3d-unet-kits19,fb7e31419c0f4226) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-3d-unet-kits19)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,raw,3d-unet,kits19,medical-imaging* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model raw 3d-unet kits19 medical-imaging" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,raw,3d-unet,kits19,medical-imaging` + +`cm run script --tags=get,ml-model,raw,3d-unet,kits19,medical-imaging[,variations] ` + +*or* + +`cmr "get ml-model raw 3d-unet kits19 medical-imaging"` + +`cmr "get ml-model raw 3d-unet kits19 medical-imaging [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,3d-unet,kits19,medical-imaging' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,raw,3d-unet,kits19,medical-imaging"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,raw,3d-unet,kits19,medical-imaging) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model raw 3d-unet kits19 medical-imaging[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_onnx,fp32` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.86170` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1` + - Workflow: + * `_pytorch,fp32` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.86170` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1` + - Workflow: + * `_pytorch,fp32,weights` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.86170` + - *CM_ML_MODEL_FILE*: `retinanet_model_10.pth` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1` + - *CM_UNZIP*: `yes` + - Workflow: + * `_tf,fp32` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.86170` + - *CM_ML_MODEL_FILE*: `3dunet_kits19_128x128x128.tf` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1` + - *CM_UNZIP*: `yes` + - Workflow: + * `_weights` + - Environment variables: + - *CM_MODEL_WEIGHTS_FILE*: `yes` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_onnx`** (default) + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `onnx` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - Workflow: + * `_tf` + - Aliases: `_tensorflow` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `tensorflow` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_onnx` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-3d-unet-kits19/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-3d-unet-kits19/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-3d-unet-kits19/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-3d-unet-kits19/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-3d-unet-kits19/_cm.json) + +___ +### Script output +`cmr "get ml-model raw 3d-unet kits19 medical-imaging [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_PATH` \ No newline at end of file diff --git a/script/get-ml-model-3d-unet-kits19/_cm.json b/script/get-ml-model-3d-unet-kits19/_cm.json new file mode 100644 index 0000000000..076d18c5f6 --- /dev/null +++ b/script/get-ml-model-3d-unet-kits19/_cm.json @@ -0,0 +1,94 @@ +{ + "alias": "get-ml-model-3d-unet-kits19", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "3d-unet-kits19", + "CM_ML_MODEL_DATASET": "kits19", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "new_env_keys": [ + "CM_ML_MODEL_*" + ], + "tags": [ + "get", + "ml-model", + "raw", + "3d-unet", + "kits19", + "medical-imaging" + ], + "uid": "fb7e31419c0f4226", + "variations": { + "fp32": { + "default": true, + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision" + }, + "onnx": { + "default": true, + "env": { + "CM_ML_MODEL_FRAMEWORK": "onnx" + }, + "group": "framework" + }, + "onnx,fp32": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.86170", + "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1" + } + }, + "pytorch": { + "env": { + "CM_ML_MODEL_FRAMEWORK": "pytorch" + }, + "group": "framework" + }, + "pytorch,fp32": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.86170", + "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1" + } + }, + "pytorch,fp32,weights": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.86170", + "CM_ML_MODEL_FILE": "retinanet_model_10.pth", + "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1", + "CM_UNZIP": "yes" + } + }, + "weights": { + "env": { + "CM_MODEL_WEIGHTS_FILE": "yes" + } + }, + "tf": { + "env": { + "CM_ML_MODEL_FRAMEWORK": "tensorflow" + }, + "group": "framework" + }, + "tensorflow": { + "alias": "tf" + }, + "tf,fp32": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.86170", + "CM_ML_MODEL_FILE": "3dunet_kits19_128x128x128.tf", + "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1", + "CM_UNZIP": "yes" + } + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-3d-unet-kits19/customize.py b/script/get-ml-model-3d-unet-kits19/customize.py new file mode 100644 index 0000000000..65961f1565 --- /dev/null +++ b/script/get-ml-model-3d-unet-kits19/customize.py @@ -0,0 +1,38 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_PACKAGE_URL'] + + print ('Downloading from {}'.format(url)) + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url}) + if r['return']>0: return r + + filename = r['filename'] + + if env.get('CM_UNZIP') == "yes": + os.system("unzip "+filename) + filename = env['CM_ML_MODEL_FILE'] + env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + else: + # Add to path + env['CM_ML_MODEL_FILE']=filename + env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] + + env['CM_ML_MODEL_PATH']=path + + return {'return':0} diff --git a/script/get-ml-model-bert-base-squad/README.md b/script/get-ml-model-bert-base-squad/README.md new file mode 100644 index 0000000000..4cae18625a --- /dev/null +++ b/script/get-ml-model-bert-base-squad/README.md @@ -0,0 +1,184 @@ +Automatically generated README for this automation recipe: **get-ml-model-bert-base-squad** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-bert-base-squad,b3b10b452ce24c5f) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-base-squad)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model raw bert bert-base bert-squad language language-processing" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing` + +`cm run script --tags=get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing[,variations] ` + +*or* + +`cmr "get ml-model raw bert bert-base bert-squad language language-processing"` + +`cmr "get ml-model raw bert bert-base bert-squad language language-processing [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model raw bert bert-base bert-squad language language-processing[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_deepsparse,int8` + - Environment variables: + - *CM_ML_MODEL_F1*: `87.89` + - *CM_ML_MODEL_FILE*: `model.onnx` + - *CM_PRUNING_PERCENTAGE*: `95` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,zoo,deepsparse,_pruned95_obs_quant-none + * CM names: `--adr.['neural-magic-zoo-downloader']...` + - *Warning: no scripts found* + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_deepsparse` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `deepsparse` + - *CM_ML_MODEL_INPUT_IDS_NAME*: `input_ids` + - *CM_ML_MODEL_INPUT_MASK_NAME*: `input_mask` + - *CM_ML_MODEL_INPUT_SEGMENTS_NAME*: `segment_ids` + - *CM_ML_MODEL_OUTPUT_END_LOGITS_NAME*: `output_end_logits` + - *CM_ML_MODEL_OUTPUT_START_LOGITS_NAME*: `output_start_logits` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_PRECISION*: `fp32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_QUANTIZED*: `yes` + - Workflow: + +
+ + +#### Default variations + +`_fp32` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-base-squad/_cm.json) + 1. Run "preprocess" function from customize.py + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-base-squad/_cm.json)*** + * download-and-extract + * `if (CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD in yes)` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-base-squad/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-base-squad/_cm.json)*** + * get,bert,squad,vocab + - CM script: [get-bert-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-bert-squad-vocab) + +___ +### Script output +`cmr "get ml-model raw bert bert-base bert-squad language language-processing [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL*` +#### New environment keys auto-detected from customize diff --git a/script/get-ml-model-bert-base-squad/_cm.json b/script/get-ml-model-bert-base-squad/_cm.json new file mode 100644 index 0000000000..b893590b88 --- /dev/null +++ b/script/get-ml-model-bert-base-squad/_cm.json @@ -0,0 +1,92 @@ +{ + "alias": "get-ml-model-bert-base-squad", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "BERT", + "CM_ML_MODEL_DATASET": "squad-1.1", + "CM_ML_MODEL_MAX_SEQ_LENGTH": "384", + "CM_ML_MODEL_NAME": "MLPERF BERT Base on SQuAD v1.1", + "CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD": "no" + }, + "new_env_keys": [ + "CM_ML_MODEL*" + ], + "tags": [ + "get", + "ml-model", + "raw", + "bert", + "bert-base", + "bert-squad", + "language", + "language-processing" + ], + "uid": "b3b10b452ce24c5f", + "prehook_deps": [ + { + "tags": "download-and-extract", + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH" + }, + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_PACKAGE_URL" ] + }, + "enable_if_env": { + "CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD": "yes" + } + } + ], + "post_deps": [ + { + "tags": "get,bert,squad,vocab" + } + ], + "variations": { + "deepsparse": { + "env": { + "CM_ML_MODEL_FRAMEWORK": "deepsparse", + "CM_ML_MODEL_INPUT_IDS_NAME": "input_ids", + "CM_ML_MODEL_INPUT_MASK_NAME": "input_mask", + "CM_ML_MODEL_INPUT_SEGMENTS_NAME": "segment_ids", + "CM_ML_MODEL_OUTPUT_END_LOGITS_NAME": "output_end_logits", + "CM_ML_MODEL_OUTPUT_START_LOGITS_NAME": "output_start_logits" + }, + "group": "framework" + }, + "deepsparse,int8": { + "deps": [ + { + "names": [ "neural-magic-zoo-downloader" ], + "tags": "get,ml-model,zoo,deepsparse,_pruned95_obs_quant-none" + } + ], + "env": { + "CM_ML_MODEL_F1": "87.89", + "CM_ML_MODEL_FILE": "model.onnx", + "CM_PRUNING_PERCENTAGE": "95" + } + }, + "fp32": { + "default": true, + "env": { + "CM_ML_MODEL_PRECISION": "fp32" + }, + "group": "precision" + }, + "int8": { + "env": { + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_QUANTIZED": "yes" + }, + "group": "precision" + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH": "Path to the BERT vocab file" + } +} diff --git a/script/get-ml-model-bert-large-squad/README.md b/script/get-ml-model-bert-large-squad/README.md new file mode 100644 index 0000000000..04d25650f5 --- /dev/null +++ b/script/get-ml-model-bert-large-squad/README.md @@ -0,0 +1,358 @@ +Automatically generated README for this automation recipe: **get-ml-model-bert-large-squad** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-bert-large-squad,5e865dbdc65949d2) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model raw bert bert-large bert-squad language language-processing" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing` + +`cm run script --tags=get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing[,variations] ` + +*or* + +`cmr "get ml-model raw bert bert-large bert-squad language language-processing"` + +`cmr "get ml-model raw bert bert-large bert-squad language language-processing [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model raw bert bert-large bert-squad language language-processing[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_deepsparse,int8` + - Environment variables: + - *CM_ML_MODEL_F1*: `90.21282641816266` + - *CM_ML_MODEL_FILE*: `oBERT-Large_95sparse_block4_qat.onnx` + - *CM_DAE_EXTRACT_DOWNLOADED*: `yes` + - Workflow: + * `_deepsparse,int8,github` + - Environment variables: + - *CM_PACKAGE_URL*: `https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz` + - Workflow: + * `_onnx,fp32` + - Environment variables: + - *CM_ML_MODEL_F1*: `90.874` + - Workflow: + * `_onnx,fp32,armi` + - Environment variables: + - *CM_PACKAGE_URL*: `https://armi.in/files/model.onnx` + - *CM_PACKAGE_URL1*: `https://zenodo.org/record/3733910/files/model.onnx` + - Workflow: + * `_onnx,fp32,zenodo` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3733910/files/model.onnx` + - Workflow: + * `_onnx,int8` + - Environment variables: + - *CM_ML_MODEL_F1*: `90.067` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + - Workflow: + * `_onnx,int8,amazon-s3` + - Environment variables: + - *CM_PACKAGE_URL*: `https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx` + - Workflow: + * `_onnx,int8,zenodo` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + - Workflow: + * `_onnxruntime` + - Workflow: + * `_pytorch,fp32` + - Environment variables: + - *CM_ML_MODEL_F1*: `90.874` + - *CM_DOWNLOAD_CHECKSUM*: `00fbcbfaebfa20d87ac9885120a6e9b4` + - Workflow: + * `_pytorch,fp32,armi` + - Environment variables: + - *CM_PACKAGE_URL*: `https://armi.in/files/fp32/model.pytorch` + - *CM_PACKAGE_URL1*: `https://zenodo.org/record/3733896/files/model.pytorch` + - Workflow: + * `_pytorch,fp32,zenodo` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3733896/files/model.pytorch` + - Workflow: + * `_pytorch,int8` + - Environment variables: + - *CM_ML_MODEL_F1*: `90.633` + - Workflow: + * `_pytorch,int8,armi` + - Environment variables: + - *CM_PACKAGE_URL*: `https://armi.in/files/int8/pytorch_model.bin` + - *CM_PACKAGE_URL1*: `https://zenodo.org/record/4792496/files/pytorch_model.bin` + - Workflow: + * `_pytorch,int8,zenodo` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/4792496/files/pytorch_model.bin` + - Workflow: + * `_tensorflow` + - Workflow: + * `_tf,fp32` + - Environment variables: + - *CM_ML_MODEL_F1*: `90.874` + - Workflow: + * `_tf,fp32,zenodo` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3939747/files/model.pb` + - Workflow: + +
+ + + * Group "**download-source**" +
+ Click here to expand this section. + + * `_amazon-s3` + - Workflow: + * `_armi` + - Workflow: + * `_custom-url.#` + - Environment variables: + - *CM_PACKAGE_URL*: `#` + - Workflow: + * `_github` + - Workflow: + * `_zenodo` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_deepsparse` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `deepsparse` + - *CM_ML_MODEL_INPUT_IDS_NAME*: `input_ids` + - *CM_ML_MODEL_INPUT_MASK_NAME*: `input_mask` + - *CM_ML_MODEL_INPUT_SEGMENTS_NAME*: `segment_ids` + - *CM_ML_MODEL_OUTPUT_END_LOGITS_NAME*: `output_end_logits` + - *CM_ML_MODEL_OUTPUT_START_LOGITS_NAME*: `output_start_logits` + - Workflow: + * **`_onnx`** (default) + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `onnx` + - *CM_ML_MODEL_INPUT_IDS_NAME*: `input_ids` + - *CM_ML_MODEL_INPUT_MASK_NAME*: `input_mask` + - *CM_ML_MODEL_INPUT_SEGMENTS_NAME*: `segment_ids` + - *CM_ML_MODEL_OUTPUT_END_LOGITS_NAME*: `output_end_logits` + - *CM_ML_MODEL_OUTPUT_START_LOGITS_NAME*: `output_start_logits` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - *CM_ML_MODEL_INPUT_IDS_NAME*: `input_ids` + - *CM_ML_MODEL_INPUT_MASK_NAME*: `input_mask` + - *CM_ML_MODEL_INPUT_SEGMENTS_NAME*: `segment_ids` + - *CM_ML_MODEL_OUTPUT_END_LOGITS_NAME*: `output_end_logits` + - *CM_ML_MODEL_OUTPUT_START_LOGITS_NAME*: `output_start_logits` + - Workflow: + * `_tf` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `tf` + - *CM_ML_MODEL_INPUT_IDS_NAME*: `input_ids` + - *CM_ML_MODEL_INPUT_MASK_NAME*: `input_mask` + - *CM_ML_MODEL_INPUT_SEGMENTS_NAME*: `segment_ids` + - *CM_ML_MODEL_OUTPUT_END_LOGITS_NAME*: `output_end_logits` + - *CM_ML_MODEL_OUTPUT_START_LOGITS_NAME*: `output_start_logits` + - Workflow: + +
+ + + * Group "**packing**" +
+ Click here to expand this section. + + * `_packed` + - Environment variables: + - *CM_ML_MODEL_BERT_PACKED*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_torch + * CM names: `--adr.['torch', 'pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.tensorflow + * CM names: `--adr.['tensorflow']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.transformers + * CM names: `--adr.['transformers']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.protobuf + * CM names: `--adr.['protobuf']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.onnx + * CM names: `--adr.['onnx']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx-graphsurgeon + * CM names: `--adr.['onnx-graphsurgeon']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + * CM names: `--adr.['numpy']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,mlperf,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + 1. ***Read "prehook_deps" on other CM scripts*** + * download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.data-00000-of-00001 + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.index + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.meta + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_wget,_url.https://zenodo.org/record/3733868/files/vocab.txt + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_wget,_url.https://raw.githubusercontent.com/krai/axs2kilt/main/model_onnx_bert_large_packed_recipe/convert_model.py + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * **`_unpacked`** (default) + - Environment variables: + - *CM_ML_MODEL_BERT_PACKED*: `no` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_PRECISION*: `fp32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_QUANTIZED*: `yes` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_onnx,_unpacked` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json)*** + * download-and-extract + * `if (CM_ML_MODEL_BERT_PACKED != yes)` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + * [run-packed.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad/run-packed.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json)*** + * get,dataset-aux,squad-vocab + - CM script: [get-dataset-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad-vocab) + +___ +### Script output +`cmr "get ml-model raw bert bert-large bert-squad language language-processing [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_BERT_LARGE_FP32_PATH` +* `CM_ML_MODEL_BERT_LARGE_INT8_PATH` +* `CM_ML_MODEL_BERT_PACKED_PATH` +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` \ No newline at end of file diff --git a/script/get-ml-model-bert-large-squad/_cm.json b/script/get-ml-model-bert-large-squad/_cm.json new file mode 100644 index 0000000000..8616af705b --- /dev/null +++ b/script/get-ml-model-bert-large-squad/_cm.json @@ -0,0 +1,362 @@ +{ + "alias": "get-ml-model-bert-large-squad", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "BERT", + "CM_ML_MODEL_DATASET": "squad-1.1", + "CM_ML_MODEL_MAX_SEQ_LENGTH": "384", + "CM_ML_MODEL_NAME": "MLPERF BERT Large on SQuAD v1.1", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "<<>>" + }, + "new_env_keys": [ + "CM_ML_MODEL*" + ], + "tags": [ + "get", + "ml-model", + "raw", + "bert", + "bert-large", + "bert-squad", + "language", + "language-processing" + ], + "uid": "5e865dbdc65949d2", + "prehook_deps": [ + { + "tags": "download-and-extract", + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_DOWNLOAD_URL1": "<<>>" + }, + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_PACKAGE_URL" ] + }, + "force_cache": true, + "extra_cache_tags": "bert-large,ml-model", + "skip_if_env": { + "CM_ML_MODEL_BERT_PACKED": [ "yes" ] + } + } + ], + "post_deps": [ + { + "tags": "get,dataset-aux,squad-vocab" + } + ], + "variations": { + "fp32": { + "group": "precision", + "default": true, + "env": { + "CM_ML_MODEL_PRECISION": "fp32" + } + }, + "int8": { + "group": "precision", + "env": { + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_QUANTIZED": "yes" + } + }, + "zenodo": { + "group": "download-source" + }, + "amazon-s3": { + "group": "download-source" + }, + "github": { + "group": "download-source" + }, + "armi": { + "group": "download-source" + }, + "custom-url.#": { + "group": "download-source", + "env": { + "CM_PACKAGE_URL": "#" + } + }, + "onnx,fp32": { + "env": { + "CM_ML_MODEL_F1": "90.874" + } + }, + "onnx,fp32,zenodo": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/3733910/files/model.onnx" + } + }, + "onnx,fp32,armi": { + "env": { + "CM_PACKAGE_URL": "https://armi.in/files/model.onnx", + "CM_PACKAGE_URL1": "https://zenodo.org/record/3733910/files/model.onnx" + } + }, + "onnx,int8": { + "env": { + "CM_ML_MODEL_F1": "90.067", + "CM_PACKAGE_URL": "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + } + }, + "onnx,int8,zenodo": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + } + }, + "onnx,int8,amazon-s3": { + "env": { + "CM_PACKAGE_URL": "https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx" + } + }, + "onnx": { + "group": "framework", + "default": true, + "default_variations": { + "download-source": "armi" + }, + "env": { + "CM_ML_MODEL_FRAMEWORK": "onnx", + "CM_ML_MODEL_INPUT_IDS_NAME": "input_ids", + "CM_ML_MODEL_INPUT_MASK_NAME": "input_mask", + "CM_ML_MODEL_INPUT_SEGMENTS_NAME": "segment_ids", + "CM_ML_MODEL_OUTPUT_END_LOGITS_NAME": "output_end_logits", + "CM_ML_MODEL_OUTPUT_START_LOGITS_NAME": "output_start_logits" + } + }, + "tf": { + "group": "framework", + "default_variations": { + "download-source": "zenodo" + }, + "env": { + "CM_ML_MODEL_FRAMEWORK": "tf", + "CM_ML_MODEL_INPUT_IDS_NAME": "input_ids", + "CM_ML_MODEL_INPUT_MASK_NAME": "input_mask", + "CM_ML_MODEL_INPUT_SEGMENTS_NAME": "segment_ids", + "CM_ML_MODEL_OUTPUT_END_LOGITS_NAME": "output_end_logits", + "CM_ML_MODEL_OUTPUT_START_LOGITS_NAME": "output_start_logits" + } + }, + "tf,fp32": { + "env": { + "CM_ML_MODEL_F1": "90.874" + } + }, + "tf,fp32,zenodo": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/3939747/files/model.pb" + } + }, + "pytorch": { + "group": "framework", + "default_variations": { + "download-source": "armi" + }, + "env": { + "CM_ML_MODEL_FRAMEWORK": "pytorch", + "CM_ML_MODEL_INPUT_IDS_NAME": "input_ids", + "CM_ML_MODEL_INPUT_MASK_NAME": "input_mask", + "CM_ML_MODEL_INPUT_SEGMENTS_NAME": "segment_ids", + "CM_ML_MODEL_OUTPUT_END_LOGITS_NAME": "output_end_logits", + "CM_ML_MODEL_OUTPUT_START_LOGITS_NAME": "output_start_logits" + } + }, + "pytorch,fp32": { + "env": { + "CM_ML_MODEL_F1": "90.874", + "CM_DOWNLOAD_CHECKSUM": "00fbcbfaebfa20d87ac9885120a6e9b4" + } + }, + "pytorch,fp32,zenodo": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/3733896/files/model.pytorch" + } + }, + "pytorch,fp32,armi": { + "env": { + "CM_PACKAGE_URL": "https://armi.in/files/fp32/model.pytorch", + "CM_PACKAGE_URL1": "https://zenodo.org/record/3733896/files/model.pytorch" + } + }, + "pytorch,int8": { + "env": { + "CM_ML_MODEL_F1": "90.633" + } + }, + "pytorch,int8,zenodo": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/4792496/files/pytorch_model.bin" + } + }, + "pytorch,int8,armi": { + "env": { + "CM_PACKAGE_URL": "https://armi.in/files/int8/pytorch_model.bin", + "CM_PACKAGE_URL1": "https://zenodo.org/record/4792496/files/pytorch_model.bin" + } + }, + "onnxruntime": { + "base": [ + "onnx" + ] + }, + "tensorflow": { + "base": [ + "tf" + ] + }, + "deepsparse": { + "group": "framework", + "default_variations": { + "download-source": "github" + }, + "env": { + "CM_ML_MODEL_FRAMEWORK": "deepsparse", + "CM_ML_MODEL_INPUT_IDS_NAME": "input_ids", + "CM_ML_MODEL_INPUT_MASK_NAME": "input_mask", + "CM_ML_MODEL_INPUT_SEGMENTS_NAME": "segment_ids", + "CM_ML_MODEL_OUTPUT_END_LOGITS_NAME": "output_end_logits", + "CM_ML_MODEL_OUTPUT_START_LOGITS_NAME": "output_start_logits" + } + }, + "deepsparse,int8": { + "env": { + "CM_ML_MODEL_F1": "90.21282641816266", + "CM_ML_MODEL_FILE": "oBERT-Large_95sparse_block4_qat.onnx", + "CM_DAE_EXTRACT_DOWNLOADED": "yes" + } + }, + "deepsparse,int8,github": { + "env": { + "CM_PACKAGE_URL": "https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz" + } + }, + "unpacked": { + "group": "packing", + "default": true, + "env": { + "CM_ML_MODEL_BERT_PACKED": "no" + } + }, + "packed": { + "group": "packing", + "env": { + "CM_ML_MODEL_BERT_PACKED": "yes" + }, + "deps": [ + { + "tags": "get,python3", + "names": [ "python", "python3" ], + "version_max": "3.8.999", + "version_max_usable": "3.8.12" + }, + { + "tags": "get,generic-python-lib,_torch", + "names": [ "torch", "pytorch" ], + "version": "1.8.1" + }, + { + "tags": "get,generic-python-lib,_package.tensorflow", + "names": [ "tensorflow" ], + "version": "2.11.0" + }, + { + "tags": "get,generic-python-lib,_package.transformers", + "names": [ "transformers" ], + "version": "2.4.0" + }, + { + "tags": "get,generic-python-lib,_package.protobuf", + "names": [ "protobuf" ], + "version": "3.20.1" + }, + { + "tags": "get,generic-python-lib,_package.onnx", + "names": [ "onnx" ], + "version": "1.12.0" + }, + { + "tags": "get,generic-python-lib,_onnx-graphsurgeon", + "names": [ "onnx-graphsurgeon" ], + "version": "0.3.26" + }, + { + "tags": "get,generic-python-lib,_numpy", + "names": [ "numpy" ], + "version": "1.23.0" + }, + { + "tags": "get,mlperf,inference,src", + "names": [ "inference-src" ] + } + ], + "prehook_deps": [ + { + "tags": "download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.data-00000-of-00001", + "env": { + "CM_DOWNLOAD_FILENAME": "model.ckpt-5474.data-00000-of-00001", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_CHECKPOINT_DATA_PATH", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_CHECKSUM": "3089b27c559906a868878741d992ade7" + }, + "force_cache": true, + "extra_cache_tags": "bert,checkpoint,weights,bert-large" + }, + { + "tags": "download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.index", + "env": { + "CM_DOWNLOAD_FILENAME": "model.ckpt-5474.index", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_CHECKPOINT_INDEX_PATH", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_CHECKSUM": "d23d61572d9404da4dac3363b5bc735b" + }, + "force_cache": true, + "extra_cache_tags": "bert,checkpoint-index,bert-large" + }, + { + "tags": "download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.meta", + "env": { + "CM_DOWNLOAD_FILENAME": "model.ckpt-5474.meta", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_CHECKPOINT_META_PATH", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_CHECKSUM": "83e11e57eea14c9e9a246af74af40d66" + }, + "force_cache": true, + "extra_cache_tags": "bert,checkpoint-meta,bert-large" + }, + { + "tags": "download,file,_wget,_url.https://zenodo.org/record/3733868/files/vocab.txt", + "env": { + "CM_DOWNLOAD_FILENAME": "vocab.txt", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_VOCAB_PATH", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_CHECKSUM": "64800d5d8528ce344256daf115d4965e" + }, + "force_cache": true, + "extra_cache_tags": "bert,vocab,bert-large" + }, + { + "tags": "download,file,_wget,_url.https://raw.githubusercontent.com/krai/axs2kilt/main/model_onnx_bert_large_packed_recipe/convert_model.py", + "env": { + "CM_DOWNLOAD_FILENAME": "convert_model.py", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_CONVERTER_CODE_PATH", + "CM_DOWNLOAD_CHECKSUM": "94c91ce422e8f36f9d98b4926e2ad688" + }, + "force_cache": true, + "extra_cache_tags": "bert,checkpoint,converter,code,bert-large" + } + ], + "new_env_keys": [ + "CM_BERT_" + ] + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-bert-large-squad/customize.py b/script/get-ml-model-bert-large-squad/customize.py new file mode 100644 index 0000000000..1c8e02aa43 --- /dev/null +++ b/script/get-ml-model-bert-large-squad/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + if env.get('CM_ML_MODEL_BERT_PACKED', '') == 'yes': + i['run_script_input']['script_name'] = "run-packed" + env['CM_BERT_CONFIG_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json") + env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.getcwd() + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), "model.onnx") + env['CM_ML_MODEL_BERT_PACKED_PATH'] = os.path.join(os.getcwd(), "model.onnx") + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + + if env.get('CM_ML_MODEL_PRECISION', '') == "fp32": + env['CM_ML_MODEL_BERT_LARGE_FP32_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + elif env.get('CM_ML_MODEL_PRECISION', '') == "int8": + env['CM_ML_MODEL_BERT_LARGE_INT8_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return':0} + diff --git a/script/get-ml-model-bert-large-squad/run-packed.sh b/script/get-ml-model-bert-large-squad/run-packed.sh new file mode 100644 index 0000000000..4c7b016c93 --- /dev/null +++ b/script/get-ml-model-bert-large-squad/run-packed.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_BERT_CONVERTER_CODE_PATH} --src '${CM_BERT_CHECKPOINT_INDEX_PATH}/../model.ckpt-5474' --dest '$PWD/' --config_path '${CM_BERT_CONFIG_PATH}'" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/get-ml-model-dlrm-terabyte/README.md b/script/get-ml-model-dlrm-terabyte/README.md new file mode 100644 index 0000000000..8dfe57da50 --- /dev/null +++ b/script/get-ml-model-dlrm-terabyte/README.md @@ -0,0 +1,264 @@ +Automatically generated README for this automation recipe: **get-ml-model-dlrm-terabyte** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-dlrm-terabyte,8fa7582c603a4db3) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-dlrm-terabyte)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation` + +`cm run script --tags=get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation[,variations] [--input_flags]` + +*or* + +`cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation"` + +`cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_debug` + - Environment variables: + - *CM_ML_MODEL_DEBUG*: `yes` + - Workflow: + * `_onnx,fp32` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.8025` + - *CM_PACKAGE_URL*: `https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.onnx.tar` + - *CM_UNTAR*: `yes` + - *CM_ML_MODEL_FILE*: `tb00_40M.onnx` + - *CM_ML_MODEL_DLRM_MAX_INDEX_RANGE*: `40000000` + - Workflow: + * `_onnx,fp32,debug` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.8107` + - *CM_PACKAGE_URL*: `https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.onnx.tar` + - *CM_ML_MODEL_DLRM_MAX_INDEX_RANGE*: `10000000` + - *CM_UNTAR*: `yes` + - *CM_ML_MODEL_FILE*: `tb0875_10M.onnx` + - Workflow: + * `_pytorch,fp32` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.8025` + - *CM_PACKAGE_URL*: `https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt` + - *CM_ML_MODEL_DLRM_MAX_INDEX_RANGE*: `40000000` + - *CM_DOWNLOAD_CHECKSUM*: `2d49a5288cddb37c3c64860a06d79bb9` + - Workflow: + * `_pytorch,fp32,debug` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.8107` + - *CM_PACKAGE_URL*: `https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt` + - *CM_ML_MODEL_DLRM_MAX_INDEX_RANGE*: `10000000` + - Workflow: + * `_pytorch,fp32,weight_sharded` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.8025` + - *CM_ML_MODEL_DLRM_MAX_INDEX_RANGE*: `40000000` + - *CM_ML_MODEL_FILE*: `model_weights` + - *CM_TMP_MODEL_ADDITIONAL_NAME*: `` + - *CM_DOWNLOAD_CHECKSUM*: `` + - Workflow: + * `_pytorch,fp32,weight_sharded,rclone` + - Environment variables: + - *CM_RCLONE_CONFIG_CMD*: `rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com` + - *CM_PACKAGE_URL*: `mlc-inference:mlcommons-inference-wg-public/model_weights` + - Workflow: + * `_pytorch,fp32,weight_sharded,wget` + - Environment variables: + - *CM_PACKAGE_URL*: `https://cloud.mlcommons.org/index.php/s/XzfSeLgW8FYfR3S/download` + - *CM_DAE_EXTRACT_DOWNLOADED*: `yes` + - *CM_DOWNLOAD_FILENAME*: `download` + - *CM_EXTRACT_UNZIP*: `yes` + - Workflow: + +
+ + + * Group "**download-tool**" +
+ Click here to expand this section. + + * `_rclone` + - Workflow: + * `_wget` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_onnx` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `onnx` + - Workflow: + * **`_pytorch`** (default) + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - *CM_TMP_MODEL_ADDITIONAL_NAME*: `dlrm_terabyte.pytorch` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + +
+ + + * Group "**type**" +
+ Click here to expand this section. + + * **`_weight_sharded`** (default) + - Environment variables: + - *CM_DLRM_MULTIHOT_MODEL*: `yes` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_pytorch,_weight_sharded` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--dir=value` → `CM_DOWNLOAD_PATH=value` +* `--download_path=value` → `CM_DOWNLOAD_PATH=value` +* `--to=value` → `CM_DOWNLOAD_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-dlrm-terabyte/_cm.json) + 1. Run "preprocess" function from customize.py + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-dlrm-terabyte/_cm.json)*** + * download-and-extract + * CM names: `--adr.['dae']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-dlrm-terabyte/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-dlrm-terabyte/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-dlrm-terabyte/_cm.json) + +___ +### Script output +`cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize diff --git a/script/get-ml-model-dlrm-terabyte/_cm.json b/script/get-ml-model-dlrm-terabyte/_cm.json new file mode 100644 index 0000000000..07ef945d1f --- /dev/null +++ b/script/get-ml-model-dlrm-terabyte/_cm.json @@ -0,0 +1,164 @@ +{ + "alias": "get-ml-model-dlrm-terabyte", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "dlrm", + "CM_ML_MODEL_DATASET": "criteo-terabyte", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH" + }, + "new_env_keys": [ + "CM_ML_MODEL_*" + ], + "input_mapping": { + "dir": "CM_DOWNLOAD_PATH", + "download_path": "CM_DOWNLOAD_PATH", + "to": "CM_DOWNLOAD_PATH" + }, + "tags": [ + "get", + "ml-model", + "dlrm", + "raw", + "terabyte", + "criteo-terabyte", + "criteo", + "recommendation" + ], + "prehook_deps": [ + { + "tags": "download-and-extract", + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>", + "CM_DOWNLOAD_DOWNLOADED_FILENAME": "<<>>" + }, + "force_cache": true, + "extra_cache_tags": "ml-model,dlrm,terabyte,raw,ml-model-dlrm", + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_PACKAGE_URL" ] + }, + "names": [ "dae" ] + } + ], + "uid": "8fa7582c603a4db3", + "variations": { + "fp32": { + "default": true, + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision" + }, + "onnx": { + "env": { + "CM_ML_MODEL_FRAMEWORK": "onnx" + }, + "group": "framework" + }, + "onnx,fp32": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.8025", + "CM_PACKAGE_URL": "https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.onnx.tar", + "CM_UNTAR": "yes", + "CM_ML_MODEL_FILE": "tb00_40M.onnx", + "CM_ML_MODEL_DLRM_MAX_INDEX_RANGE": "40000000" + } + }, + "pytorch": { + "env": { + "CM_ML_MODEL_FRAMEWORK": "pytorch", + "CM_TMP_MODEL_ADDITIONAL_NAME": "dlrm_terabyte.pytorch" + }, + "group": "framework", + "default": true + }, + "pytorch,fp32": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.8025", + "CM_PACKAGE_URL": "https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt", + "CM_ML_MODEL_DLRM_MAX_INDEX_RANGE": "40000000", + "CM_DOWNLOAD_CHECKSUM": "2d49a5288cddb37c3c64860a06d79bb9" + } + }, + "pytorch,fp32,weight_sharded": { + "default_variations": { + "download-tool": "rclone" + }, + "env": { + "CM_ML_MODEL_ACCURACY": "0.8025", + "CM_ML_MODEL_DLRM_MAX_INDEX_RANGE": "40000000", + "CM_ML_MODEL_FILE": "model_weights", + "CM_TMP_MODEL_ADDITIONAL_NAME": "", + "CM_DOWNLOAD_CHECKSUM": "" + } + }, + "pytorch,fp32,weight_sharded,rclone": { + "env": { + "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", + "CM_PACKAGE_URL": "mlc-inference:mlcommons-inference-wg-public/model_weights" + } + }, + "pytorch,fp32,weight_sharded,wget": { + "env": { + "CM_PACKAGE_URL": "https://cloud.mlcommons.org/index.php/s/XzfSeLgW8FYfR3S/download", + "CM_DAE_EXTRACT_DOWNLOADED": "yes", + "CM_DOWNLOAD_FILENAME": "download", + "CM_EXTRACT_UNZIP": "yes" + } + }, + "wget": { + "group": "download-tool", + "ad": { + "dae": { + "tags": "_wget" + } + } + }, + "rclone": { + "group": "download-tool", + "ad": { + "dae": { + "tags": "_rclone" + } + } + }, + "debug": { + "env": { + "CM_ML_MODEL_DEBUG": "yes" + } + }, + "pytorch,fp32,debug": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.8107", + "CM_PACKAGE_URL": "https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt", + "CM_ML_MODEL_DLRM_MAX_INDEX_RANGE": "10000000" + } + }, + "onnx,fp32,debug": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.8107", + "CM_PACKAGE_URL": "https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.onnx.tar", + "CM_ML_MODEL_DLRM_MAX_INDEX_RANGE": "10000000", + "CM_UNTAR": "yes", + "CM_ML_MODEL_FILE": "tb0875_10M.onnx" + } + }, + "weight_sharded": { + "group": "type", + "default": true, + "env": { + "CM_DLRM_MULTIHOT_MODEL": "yes" + } + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-dlrm-terabyte/run.sh b/script/get-ml-model-dlrm-terabyte/run.sh new file mode 100644 index 0000000000..d2595b32f6 --- /dev/null +++ b/script/get-ml-model-dlrm-terabyte/run.sh @@ -0,0 +1,4 @@ +#/bin/bash +if [[ ${CM_TMP_MODEL_ADDITIONAL_NAME} ]]; then + ln -s ${CM_ML_MODEL_FILE} ${CM_TMP_MODEL_ADDITIONAL_NAME} +fi diff --git a/script/get-ml-model-efficientnet-lite/README.md b/script/get-ml-model-efficientnet-lite/README.md new file mode 100644 index 0000000000..61346f04c1 --- /dev/null +++ b/script/get-ml-model-efficientnet-lite/README.md @@ -0,0 +1,250 @@ +Automatically generated README for this automation recipe: **get-ml-model-efficientnet-lite** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-efficientnet-lite,1041f681977d4b7c) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-efficientnet-lite)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification` + +`cm run script --tags=get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification[,variations] ` + +*or* + +`cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification"` + +`cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_tflite` + - Workflow: + +
+ + + * Group "**kind**" +
+ Click here to expand this section. + + * **`_lite0`** (default) + - Environment variables: + - *CM_ML_MODEL_EFFICIENTNET_LITE_KIND*: `lite0` + - Workflow: + * `_lite1` + - Environment variables: + - *CM_ML_MODEL_EFFICIENTNET_LITE_KIND*: `lite1` + - Workflow: + * `_lite2` + - Environment variables: + - *CM_ML_MODEL_EFFICIENTNET_LITE_KIND*: `lite2` + - Workflow: + * `_lite3` + - Environment variables: + - *CM_ML_MODEL_EFFICIENTNET_LITE_KIND*: `lite3` + - Workflow: + * `_lite4` + - Environment variables: + - *CM_ML_MODEL_EFFICIENTNET_LITE_KIND*: `lite4` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - Workflow: + * `_uint8` + - Aliases: `_int8` + - Environment variables: + - *CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `uint8` + - *CM_ML_MODEL_PRECISION*: `uint8` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `uint8` + - Workflow: + +
+ + + * Group "**resolution**" +
+ Click here to expand this section. + + * **`_resolution-224`** (default) + - Environment variables: + - *CM_ML_MODEL_IMAGE_HEIGHT*: `224` + - *CM_ML_MODEL_IMAGE_WIDTH*: `224` + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `224` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.224` + - Workflow: + * `_resolution-240` + - Environment variables: + - *CM_ML_MODEL_IMAGE_HEIGHT*: `240` + - *CM_ML_MODEL_IMAGE_WIDTH*: `240` + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `240` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.240` + - Workflow: + * `_resolution-260` + - Environment variables: + - *CM_ML_MODEL_IMAGE_HEIGHT*: `260` + - *CM_ML_MODEL_IMAGE_WIDTH*: `260` + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `260` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.260` + - Workflow: + * `_resolution-280` + - Environment variables: + - *CM_ML_MODEL_IMAGE_HEIGHT*: `280` + - *CM_ML_MODEL_IMAGE_WIDTH*: `280` + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `280` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.280` + - Workflow: + * `_resolution-300` + - Environment variables: + - *CM_ML_MODEL_IMAGE_HEIGHT*: `300` + - *CM_ML_MODEL_IMAGE_WIDTH*: `300` + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `300` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.300` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_lite0,_resolution-224` + +#### Valid variation combinations checked by the community + + + +* `_lite0,_resolution-224` +* `_lite1,_resolution-240` +* `_lite2,_resolution-260` +* `_lite3,_resolution-280` +* `_lite4,_resolution-300` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` +* CM_ML_MODEL_PRECISION: `fp32` +* CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-efficientnet-lite/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-efficientnet-lite/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-efficientnet-lite/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-efficientnet-lite/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-efficientnet-lite/_cm.json) + +___ +### Script output +`cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_PATH` +* `CM_ML_MODEL_STARTING_WEIGHTS_FILENAME` \ No newline at end of file diff --git a/script/get-ml-model-efficientnet-lite/_cm.json b/script/get-ml-model-efficientnet-lite/_cm.json new file mode 100644 index 0000000000..9e6daaa324 --- /dev/null +++ b/script/get-ml-model-efficientnet-lite/_cm.json @@ -0,0 +1,167 @@ +{ + "alias": "get-ml-model-efficientnet-lite", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "default_env": { + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32" + }, + "env": { + "CM_ML_MODEL": "efficientnet-lite", + "CM_ML_MODEL_DATASET": "imagenet2012-val", + "CM_ML_MODEL_MOBILENET_NAME_SUFFIX": "", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no", + "CM_ML_MODEL_DATA_LAYOUT": "NHWC", + "CM_ML_MODEL_INPUT_LAYER_NAME": "images", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input\\\": (BATCH_SIZE, 224, 224, 3)", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "Softmax", + "CM_ML_MODEL_NORMALIZE_DATA": "yes", + "CM_ML_MODEL_SUBTRACT_MEANS": "0", + "CM_ML_MODEL_GIVEN_CHANNEL_MEANS": "", + "CM_ML_MODEL_WEIGHTS_ARE_CHECKPOINTS": "yes", + "CM_PACKAGE_URL": "https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-<<>>.tar.gz", + "CM_ML_MODEL_WEIGHTS_FILE": "model.ckpt.data-00000-of-00001", + "CM_ML_MODEL_FILE": "efficientnet-<<>>-<<>>.tflite", + "CM_UNTAR": "yes", + "CM_EXTRACT_FOLDER": "efficientnet-<<>>", + "CM_ML_MODEL_FULL_NAME": "efficientnet-<<>>-<<>>" + }, + "new_env_keys": [ + "CM_ML_MODEL_*", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS" + ], + "tags": [ + "get", + "ml-model", + "efficientnet", + "raw", + "ml-model-efficientnet", + "ml-model-efficientnet-lite", + "lite", + "tflite", + "image-classification" + ], + "uid": "1041f681977d4b7c", + "variations": { + "tflite": { + }, + "fp32": { + "group": "precision", + "default": true, + "env": { + "CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32" + } + }, + "int8": { + "alias": "uint8" + }, + "uint8": { + "group": "precision", + "env": { + "CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "uint8", + "CM_ML_MODEL_PRECISION": "uint8", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "uint8" + } + }, + "lite0": { + "default": true, + "base": [ "resolution-224" ], + "env": { + "CM_ML_MODEL_EFFICIENTNET_LITE_KIND": "lite0" + }, + "group": "kind" + }, + "lite1": { + "base": [ "resolution-240" ], + "env": { + "CM_ML_MODEL_EFFICIENTNET_LITE_KIND": "lite1" + }, + "group": "kind" + }, + "lite2": { + "base": [ "resolution-260" ], + "env": { + "CM_ML_MODEL_EFFICIENTNET_LITE_KIND": "lite2" + }, + "group": "kind" + }, + "lite3": { + "base": [ "resolution-280" ], + "env": { + "CM_ML_MODEL_EFFICIENTNET_LITE_KIND": "lite3" + }, + "group": "kind" + }, + "lite4": { + "base": [ "resolution-300" ], + "env": { + "CM_ML_MODEL_EFFICIENTNET_LITE_KIND": "lite4" + }, + "group": "kind" + }, + "resolution-300": { + "env": { + "CM_ML_MODEL_IMAGE_HEIGHT": "300", + "CM_ML_MODEL_IMAGE_WIDTH": "300", + "CM_ML_MODEL_MOBILENET_RESOLUTION": "300", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.300" + }, + "group": "resolution" + }, + "resolution-280": { + "env": { + "CM_ML_MODEL_IMAGE_HEIGHT": "280", + "CM_ML_MODEL_IMAGE_WIDTH": "280", + "CM_ML_MODEL_MOBILENET_RESOLUTION": "280", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.280" + }, + "group": "resolution" + }, + "resolution-260": { + "env": { + "CM_ML_MODEL_IMAGE_HEIGHT": "260", + "CM_ML_MODEL_IMAGE_WIDTH": "260", + "CM_ML_MODEL_MOBILENET_RESOLUTION": "260", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.260" + }, + "group": "resolution" + }, + "resolution-240": { + "env": { + "CM_ML_MODEL_IMAGE_HEIGHT": "240", + "CM_ML_MODEL_IMAGE_WIDTH": "240", + "CM_ML_MODEL_MOBILENET_RESOLUTION": "240", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.240" + }, + "group": "resolution" + }, + "resolution-224": { + "default": true, + "env": { + "CM_ML_MODEL_IMAGE_HEIGHT": "224", + "CM_ML_MODEL_IMAGE_WIDTH": "224", + "CM_ML_MODEL_MOBILENET_RESOLUTION": "224", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.224" + }, + "group": "resolution" + } + }, + "valid_variation_combinations": [ + [ "lite0", "resolution-224" ], + [ "lite1", "resolution-240" ], + [ "lite2", "resolution-260" ], + [ "lite3", "resolution-280" ], + [ "lite4", "resolution-300" ] + ], + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-efficientnet-lite/customize.py b/script/get-ml-model-efficientnet-lite/customize.py new file mode 100644 index 0000000000..5571383453 --- /dev/null +++ b/script/get-ml-model-efficientnet-lite/customize.py @@ -0,0 +1,52 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_PACKAGE_URL'] + env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url + + print ('Downloading from {}'.format(url)) + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url}) + if r['return']>0: return r + + filename = r['filename'] + + if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": + if env.get('CM_UNZIP') == "yes": + cmd="unzip " + elif env.get('CM_UNTAR') == "yes": + cmd="tar -xvzf " + os.system(cmd+filename) + + filename = env['CM_ML_MODEL_FILE'] + + extract_folder = env.get('CM_EXTRACT_FOLDER', '') + + if extract_folder: + env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, extract_folder, filename) + else: + env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + else: + env['CM_ML_MODEL_FILE']=filename + env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] + + env['CM_ML_MODEL_PATH']=path + + if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): + return {'return':1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + + return {'return':0} diff --git a/script/get-ml-model-gptj/README.md b/script/get-ml-model-gptj/README.md new file mode 100644 index 0000000000..30508eae67 --- /dev/null +++ b/script/get-ml-model-gptj/README.md @@ -0,0 +1,322 @@ +Automatically generated README for this automation recipe: **get-ml-model-gptj** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-gptj,a41166210f294fbf) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,raw,ml-model,gptj,gpt-j,large-language-model* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get raw ml-model gptj gpt-j large-language-model" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,raw,ml-model,gptj,gpt-j,large-language-model` + +`cm run script --tags=get,raw,ml-model,gptj,gpt-j,large-language-model[,variations] [--input_flags]` + +*or* + +`cmr "get raw ml-model gptj gpt-j large-language-model"` + +`cmr "get raw ml-model gptj gpt-j large-language-model [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,gptj,gpt-j,large-language-model' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,raw,ml-model,gptj,gpt-j,large-language-model"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,raw,ml-model,gptj,gpt-j,large-language-model) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get raw ml-model gptj gpt-j large-language-model[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_ML_MODEL_BATCH_SIZE*: `#` + - Workflow: + * `_pytorch,fp32` + - Environment variables: + - *CM_DOWNLOAD_EXTRA_OPTIONS*: ` --output-document checkpoint.zip` + - *CM_UNZIP*: `yes` + - *CM_DOWNLOAD_CHECKSUM_NOT_USED*: `e677e28aaf03da84584bb3073b7ee315` + - *CM_PACKAGE_URL*: `https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download` + - *CM_RCLONE_CONFIG_CMD*: `rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com` + - *CM_RCLONE_URL*: `mlc-inference:mlcommons-inference-wg-public/gpt-j` + - Workflow: + * `_pytorch,fp32,wget` + - Workflow: + * `_pytorch,int4,intel` + - Workflow: + * `_pytorch,int8,intel` + - Workflow: + * `_pytorch,intel` + - Environment variables: + - *CM_GPTJ_INTEL_MODEL*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,mlperf,inference,results + - CM script: [get-mlperf-inference-results](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results) + - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir) + * get,ml-model,gpt-j,_fp32,_pytorch + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + * get,conda,_name.gptj-pt + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,python,_conda.gptj-pt + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic,conda-package,_package.intel-openmp,_source.intel + * CM names: `--adr.['conda-package', 'intel-openmp']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.jemalloc,_source.conda-forge + * CM names: `--adr.['conda-package', 'jemalloc']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj + - CM script: [install-ipex-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-ipex-from-src) + * get,dataset,cnndm,_calibration + - CM script: [get-dataset-cnndm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm) + * `_saxml,fp32` + - Environment variables: + - *CM_TMP_MODEL_SAXML*: `fp32` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,gptj,_pytorch,_fp32 + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_package.jax[cpu] + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.paxml + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.praxis + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.accelerate + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_saxml,int8` + - Environment variables: + - *CM_TMP_MODEL_SAXML*: `int8` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,gptj,_saxml,_fp32 + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_package.praxis + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.apache-beam + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,git,repo,_repo.https://github.com/google/saxml + * CM names: `--adr.['saxml']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + +
+ + + * Group "**download-tool**" +
+ Click here to expand this section. + + * **`_rclone`** (default) + - Environment variables: + - *CM_DOWNLOAD_FILENAME*: `checkpoint` + - *CM_DOWNLOAD_URL*: `<<>>` + - Workflow: + * `_wget` + - Environment variables: + - *CM_DOWNLOAD_URL*: `<<>>` + - *CM_DOWNLOAD_FILENAME*: `checkpoint.zip` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NCHW` + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - *CM_ML_STARTING_WEIGHTS_FILENAME*: `<<>>` + - Workflow: + * `_saxml` + - Workflow: + +
+ + + * Group "**model-provider**" +
+ Click here to expand this section. + + * `_intel` + - Workflow: + * **`_mlcommons`** (default) + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_fp32` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + * `_int4` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `int4` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `int4` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `int8` + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `uint8` + - *CM_ML_MODEL_PRECISION*: `uint8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `uint8` + - Workflow: + +
+ + +#### Default variations + +`_mlcommons,_pytorch,_rclone` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--checkpoint=value` → `GPTJ_CHECKPOINT_PATH=value` +* `--download_path=value` → `CM_DOWNLOAD_PATH=value` +* `--to=value` → `CM_DOWNLOAD_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "checkpoint":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/_cm.json)*** + * download-and-extract + * `if (CM_TMP_REQUIRE_DOWNLOAD == yes)` + * CM names: `--adr.['dae']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + * [run-int4-calibration.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/run-int4-calibration.sh) + * [run-intel.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/run-intel.sh) + * [run-saxml-quantized.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/run-saxml-quantized.sh) + * [run-saxml.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/run-saxml.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-gptj/_cm.json) + +___ +### Script output +`cmr "get raw ml-model gptj gpt-j large-language-model [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +* `GPTJ_CHECKPOINT_PATH` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_WEIGHT_DATA_TYPES` \ No newline at end of file diff --git a/script/get-ml-model-gptj/_cm.json b/script/get-ml-model-gptj/_cm.json new file mode 100644 index 0000000000..f3a3f64b86 --- /dev/null +++ b/script/get-ml-model-gptj/_cm.json @@ -0,0 +1,293 @@ +{ + "alias": "get-ml-model-gptj", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "GPTJ", + "CM_ML_MODEL_DATASET": "cnndm", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "input_mapping": { + "checkpoint": "GPTJ_CHECKPOINT_PATH", + "download_path": "CM_DOWNLOAD_PATH", + "to": "CM_DOWNLOAD_PATH" + }, + "new_env_keys": [ + "CM_ML_MODEL_*", + "GPTJ_CHECKPOINT_PATH" + ], + "prehook_deps": [ + { + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "GPTJ_CHECKPOINT_PATH", + "CM_EXTRACT_FINAL_ENV_NAME": "GPTJ_CHECKPOINT_PATH", + "CM_EXTRACT_TO_FOLDER": "gpt-j" + }, + "tags": "download-and-extract", + "update_tags_from_env_with_prefix": { + "_url.": [ + "CM_DOWNLOAD_URL" + ] + }, + "enable_if_env": { + "CM_TMP_REQUIRE_DOWNLOAD": [ "yes" ] + }, + "force_cache": true, + "names": [ + "dae" + ], + "extra_cache_tags": "gptj,model" + } + ], + "tags": [ + "get", + "raw", + "ml-model", + "gptj", + "gpt-j", + "large-language-model" + ], + "uid": "a41166210f294fbf", + "variations": { + "batch_size.#": { + "env": { + "CM_ML_MODEL_BATCH_SIZE": "#" + } + }, + "fp32": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision" + }, + "int8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "int8", + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "int8" + }, + "group": "precision" + }, + "pytorch": { + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NCHW", + "CM_ML_MODEL_FRAMEWORK": "pytorch", + "CM_ML_STARTING_WEIGHTS_FILENAME": "<<>>" + }, + "group": "framework", + "default": true + }, + "pytorch,fp32": { + "env": { + "CM_DOWNLOAD_EXTRA_OPTIONS": " --output-document checkpoint.zip", + "CM_UNZIP": "yes", + "CM_DOWNLOAD_CHECKSUM_NOT_USED": "e677e28aaf03da84584bb3073b7ee315", + "CM_PACKAGE_URL": "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download", + "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", + "CM_RCLONE_URL": "mlc-inference:mlcommons-inference-wg-public/gpt-j" + }, + "required_disk_space": 22700 + }, + "pytorch,fp32,wget": { + "add_deps_recursive": { + "dae": { + "tags": "_extract" + } + } + }, + "saxml": { + "group": "framework" + }, + "saxml,fp32": { + "env": { + "CM_TMP_MODEL_SAXML": "fp32" + }, + "new_env_keys": [ + "GPTJ_SAXML_CHECKPOINT_PATH" + ], + "deps": [ + { + "tags": "get,ml-model,gptj,_pytorch,_fp32" + }, + { + "tags": "get,python3", + "names": [ + "python", + "python3" + ] + }, + { + "tags": "get,generic-python-lib,_package.jax[cpu]" + }, + { + "tags": "get,generic-python-lib,_package.paxml" + }, + { + "tags": "get,generic-python-lib,_package.praxis" + }, + { + "tags": "get,generic-python-lib,_package.transformers" + }, + { + "tags": "get,generic-python-lib,_package.accelerate" + } + ] + }, + "saxml,int8": { + "env": { + "CM_TMP_MODEL_SAXML": "int8" + }, + "deps": [ + { + "tags": "get,ml-model,gptj,_saxml,_fp32" + }, + { + "tags": "get,python3", + "names": [ + "python", + "python3" + ], + "version": "3.10.0" + }, + { + "tags": "get,generic-python-lib,_package.praxis" + }, + { + "tags": "get,generic-python-lib,_package.apache-beam" + }, + { + "tags": "get,git,repo,_repo.https://github.com/google/saxml", + "extra_cache_tags": "saxml", + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_SAXML_REPO_PATH" + }, + "names": [ + "saxml" + ] + } + ] + }, + "int4": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "int4", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "int4" + }, + "group": "precision" + }, + "uint8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "uint8", + "CM_ML_MODEL_PRECISION": "uint8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "uint8" + }, + "group": "precision" + }, + "mlcommons": { + "default_variations": { + "precision": "fp32" + }, + "group": "model-provider", + "default": true + }, + "intel": { + "default_variations": { + "framework": "pytorch" + }, + "group": "model-provider" + }, + "pytorch,intel": { + "default_variations": { + "precision": "int8" + }, + "adr": { + "conda-package": { + "tags": "_name.gptj-pt" + } + }, + "deps": [ + { + "tags": "get,mlperf,inference,results", + "version": "v3.1" + }, + { + "tags": "get,ml-model,gpt-j,_fp32,_pytorch", + "env": { + "CM_GPTJ_INTEL_MODEL": "" + }, + "force_new_env_keys": [ + "GPTJ_CHECKPOINT_PATH" + ] + }, + { + "tags": "get,conda,_name.gptj-pt" + }, + { + "tags": "get,python,_conda.gptj-pt" + }, + { + "names": [ + "conda-package", + "intel-openmp" + ], + "tags": "get,generic,conda-package,_package.intel-openmp,_source.intel", + "version": "2023.1.0" + }, + { + "tags": "get,generic,conda-package,_package.jemalloc,_source.conda-forge", + "names": [ + "conda-package", + "jemalloc" + ] + }, + { + "tags": "install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj" + }, + { + "tags": "get,dataset,cnndm,_calibration" + } + ], + "env": { + "CM_GPTJ_INTEL_MODEL": "yes" + } + }, + "wget": { + "group": "download-tool", + "add_deps_recursive": { + "dae": { + "tags": "_wget" + } + }, + "env": { + "CM_DOWNLOAD_URL": "<<>>", + "CM_DOWNLOAD_FILENAME": "checkpoint.zip" + } + }, + "rclone": { + "group": "download-tool", + "default": true, + "add_deps_recursive": { + "dae": { + "tags": "_rclone" + } + }, + "env": { + "CM_DOWNLOAD_FILENAME": "checkpoint", + "CM_DOWNLOAD_URL": "<<>>" + } + }, + "pytorch,int8,intel": { + }, + "pytorch,int4,intel": { + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + }, + "docker": { + "run": false + } +} diff --git a/script/get-ml-model-gptj/convert_gptj_ckpt.py b/script/get-ml-model-gptj/convert_gptj_ckpt.py new file mode 100644 index 0000000000..34f404932a --- /dev/null +++ b/script/get-ml-model-gptj/convert_gptj_ckpt.py @@ -0,0 +1,179 @@ +"""Convert weights from a gpt-j-6b model to a pax one. + +Usage: + +# Install the latest main branch of huggingface/transformers +pip3 install git+https://github.com/huggingface/transformers + +# Get a checkpiont from the GPTJ family +https://huggingface.co/EleutherAI/gpt-j-6b + +This points to +https://github.com/huggingface/transformers/blob/v4.30.2/src/transformers/models/gptj/modeling_flax_gptj.py +and in the default config, use_parallel_residual is true + +# Example cmd: +python3 -m convert_gptj_ckpt --base EleutherAI/gpt-j-6b --pax pax_3b +""" +import argparse +import jax +from jax.experimental import pjit +import numpy as np +from paxml import checkpoints +from paxml import train_states +from praxis import py_utils +from transformers import AutoModelForCausalLM + +# 6B example +num_layers = 28 +num_heads = 16 +dims_per_head = 256 +vocab = 50401 +num_gpus = 1 + + +def convert(base_model_path, pax_model_path): + """Convert from gpt-j-6b to pax.""" + print(f'Loading the base model from {base_model_path}') + + base = AutoModelForCausalLM.from_pretrained( + base_model_path, low_cpu_mem_usage=True + ) + for key, value in base.state_dict().items(): + print('%s %s' % (key, value.data.numpy().shape)) + + jax_weights = { + 'lm': { + 'embedding_lookup': { + 'emb_var': base.state_dict()[ + 'transformer.wte.weight' + ].data.numpy()[:vocab, :] + }, + 'softmax': { + 'logits_ffn': { + 'linear': { + 'w': ( + base.state_dict()['lm_head.weight'] + .data.numpy() + .transpose()[:, :vocab] + ), + }, + 'bias': {'b': base.state_dict()['lm_head.bias'].data.numpy()}, + } + }, + 'final_ln': { + 'scale': base.state_dict()[ + 'transformer.ln_f.weight' + ].data.numpy(), + 'bias': base.state_dict()['transformer.ln_f.bias'].data.numpy(), + }, + 'transformer': {}, + } + } + + for layer_idx in range(num_layers): + query = base.state_dict()[ + 'transformer.h.%d.attn.q_proj.weight' % layer_idx + ].data.numpy() + key = base.state_dict()[ + 'transformer.h.%d.attn.k_proj.weight' % layer_idx + ].data.numpy() + value = base.state_dict()[ + 'transformer.h.%d.attn.v_proj.weight' % layer_idx + ].data.numpy() + wc = np.stack((query, key, value)) + wc = np.reshape( + wc, [3, num_heads, dims_per_head, num_heads * dims_per_head] + ) + wc = np.transpose(wc, (0, 3, 1, 2)) + + w_post = base.state_dict()[ + 'transformer.h.%d.attn.out_proj.weight' % layer_idx + ].data.numpy() + w_post = np.reshape( + w_post, [num_heads * dims_per_head, num_heads, dims_per_head] + ) + layer_weight = { + 'self_attention': { + 'combined_qkv': { + 'w': wc, + }, + 'post': { + 'w': w_post, + }, + }, + 'ff_layer': { + 'ffn_layer1': { + 'linear': { + 'w': ( + base.state_dict()[ + 'transformer.h.%d.mlp.fc_in.weight' % layer_idx + ] + .data.numpy() + .transpose() + ), + }, + 'bias': { + 'b': base.state_dict()[ + 'transformer.h.%d.mlp.fc_in.bias' % layer_idx + ].data.numpy(), + }, + }, + 'ffn_layer2': { + 'linear': { + 'w': ( + base.state_dict()[ + 'transformer.h.%d.mlp.fc_out.weight' % layer_idx + ] + .data.numpy() + .transpose() + ), + }, + 'bias': { + 'b': base.state_dict()[ + 'transformer.h.%d.mlp.fc_out.bias' % layer_idx + ].data.numpy(), + }, + }, + }, + 'layer_norm': { + 'scale': base.state_dict()[ + 'transformer.h.%d.ln_1.weight' % layer_idx + ].data.numpy(), + 'bias': base.state_dict()[ + 'transformer.h.%d.ln_1.bias' % layer_idx + ].data.numpy(), + }, + } + jax_weights['lm']['transformer']['x_layers_%d' % layer_idx] = layer_weight + + print(f'Saving the pax model to {pax_model_path}') + jax_states = train_states.TrainState( + step=0, mdl_vars={'params': jax_weights}, opt_states={} + ) + device_mesh = py_utils.create_device_mesh([1, 1, num_gpus]) + global_mesh = jax.sharding.Mesh(device_mesh, ['replica', 'data_mdl2', 'mdl']) + + # Identity pjit is needed to output a GDA model_states. + def identity(x): + return x + + pjitted_identity = pjit.pjit(identity, in_shardings=None, out_shardings=None) + with global_mesh: + jax_states_gda = pjitted_identity(jax_states) + + checkpoints.save_checkpoint( + jax_states_gda, + pax_model_path, + checkpoint_type=checkpoints.CheckpointType.GDA, + ) + print('done') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--base-model-path', type=str, required=True) + parser.add_argument('--pax-model-path', type=str, required=True) + args = parser.parse_args() + + convert(args.base_model_path, args.pax_model_path) diff --git a/script/get-ml-model-gptj/customize.py b/script/get-ml-model-gptj/customize.py new file mode 100644 index 0000000000..d719f0095f --- /dev/null +++ b/script/get-ml-model-gptj/customize.py @@ -0,0 +1,65 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if env.get('CM_GPTJ_INTEL_MODEL', '') == 'yes': + i['run_script_input']['script_name'] = 'run-intel' + harness_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', 'gptj-99', 'pytorch-cpu') + print(f"Harness Root: {harness_root}") + env['CM_HARNESS_CODE_ROOT'] = harness_root + env['CM_CALIBRATION_CODE_ROOT'] = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration') + + env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH'] + + env['QUANTIZED_MODEL_DIR'] = os.getcwd() + + if env['CM_ML_MODEL_WEIGHT_DATA_TYPES'] == "int8": + env['INT8_MODEL_DIR'] = os.getcwd() + else: + env['INT4_MODEL_DIR'] = os.getcwd() + else: + is_saxml = env.get('CM_TMP_MODEL_SAXML','') + if is_saxml == "fp32": + i['run_script_input']['script_name'] = 'run-saxml' + elif is_saxml == "int8": + i['run_script_input']['script_name'] = 'run-saxml-quantized' + else: + path = env.get('GPTJ_CHECKPOINT_PATH', '').strip() + + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + if os.path.exists(os.path.join(env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final")): + env['GPTJ_CHECKPOINT_PATH'] = os.path.join(env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final") + + is_saxml = env.get('CM_TMP_MODEL_SAXML','') + if is_saxml == "fp32": + if os.path.exists("pax_gptj_checkpoint"): + env['GPTJ_SAXML_CHECKPOINT_PATH'] = os.path.join(os.getcwd(), "pax_gptj_checkpoint") + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_CHECKPOINT_PATH'] + else: + return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} + + elif is_saxml == "int8": + if os.path.exists("int8_ckpt"): + env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] = os.path.join(os.getcwd(), "int8_ckpt") + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] + else: + return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} + else: + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_CHECKPOINT_PATH'] + + env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return':0} diff --git a/script/get-ml-model-gptj/run-int4-calibration.sh b/script/get-ml-model-gptj/run-int4-calibration.sh new file mode 100644 index 0000000000..45c3669e56 --- /dev/null +++ b/script/get-ml-model-gptj/run-int4-calibration.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +echo ${CM_CALIBRATION_CODE_ROOT} +cd ${CM_CALIBRATION_CODE_ROOT}/gpt-j/pytorch-cpu/INT4 +pip install -r requirements.txt +bash run_calibration_int4.sh + +test $? -eq 0 || exit $? diff --git a/script/get-ml-model-gptj/run-intel.sh b/script/get-ml-model-gptj/run-intel.sh new file mode 100644 index 0000000000..f6cb2134da --- /dev/null +++ b/script/get-ml-model-gptj/run-intel.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +export CALIBRATION_DATA_JSON=${CM_CALIBRATION_DATASET_CNNDM_PATH} + + +if [[ ${CM_ML_MODEL_WEIGHT_DATA_TYPES} == "int4" ]]; then + export INT4_CALIBRATION_DIR=${PWD}/quantized-int4-model + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-int4-calibration.sh + cd ${CM_HARNESS_CODE_ROOT} + bash run_quantization_int4.sh +else + cd ${CM_HARNESS_CODE_ROOT} + bash run_quantization.sh +fi + +test $? -eq 0 || exit $? diff --git a/script/get-ml-model-gptj/run-saxml-quantized.sh b/script/get-ml-model-gptj/run-saxml-quantized.sh new file mode 100644 index 0000000000..e74862be01 --- /dev/null +++ b/script/get-ml-model-gptj/run-saxml-quantized.sh @@ -0,0 +1,6 @@ +#!/bin/bash +CUR=$PWD +${CM_PYTHON_BIN_WITH_PATH} -m pip install jaxlib==0.4.24 +cd ${CM_TMP_CURRENT_SCRIPT_PATH} +${CM_PYTHON_BIN_WITH_PATH} ${CM_SAXML_REPO_PATH}/saxml/tools/offline_quantize.py --input_dir ${CM_ML_MODEL_FILE_WITH_PATH}/checkpoint_00000000/state --output_dir ${CUR}/int8_ckpt/checkpoint_00000000/state --quantization_configs "gptj" > offline_quantize2.log +test $? -eq 0 || exit $? diff --git a/script/get-ml-model-gptj/run-saxml.sh b/script/get-ml-model-gptj/run-saxml.sh new file mode 100644 index 0000000000..031d736c09 --- /dev/null +++ b/script/get-ml-model-gptj/run-saxml.sh @@ -0,0 +1,8 @@ +#!/bin/bash +CUR=$PWD +rm -rf pax_gptj_checkpoint +cd ${CM_TMP_CURRENT_SCRIPT_PATH} +${CM_PYTHON_BIN_WITH_PATH} -m convert_gptj_ckpt --base ${GPTJ_CHECKPOINT_PATH} --pax ${CUR}/pax_gptj_checkpoint +test $? -eq 0 || exit $? + +cd "$CUR" diff --git a/script/get-ml-model-huggingface-zoo/README-extra.md b/script/get-ml-model-huggingface-zoo/README-extra.md new file mode 100644 index 0000000000..b7ec3407b2 --- /dev/null +++ b/script/get-ml-model-huggingface-zoo/README-extra.md @@ -0,0 +1,21 @@ +# Examples + +```bash +cmr "get ml-model huggingface zoo _model-stub.alpindale/Llama-2-13b-ONNX" --model_filename=FP32/LlamaV2_13B_float32.onnx --full_subfolder=FP32 +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.microsoft/Mistral-7B-v0.1-onnx" --model_filename=Mistral-7B-v0.1.onnx,Mistral-7B-v0.1.onnx.data +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.Intel/gpt-j-6B-int8-static" --model_filename=model.onnx --full_subfolder=. +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.runwayml/stable-diffusion-v1-5" --revision=onnx --model_filename=unet/model.onnx,unet/weights.pb +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --model_filename=model.onnx +``` diff --git a/script/get-ml-model-huggingface-zoo/README.md b/script/get-ml-model-huggingface-zoo/README.md new file mode 100644 index 0000000000..571cc9c545 --- /dev/null +++ b/script/get-ml-model-huggingface-zoo/README.md @@ -0,0 +1,194 @@ +Automatically generated README for this automation recipe: **get-ml-model-huggingface-zoo** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-huggingface-zoo,53cf8252a443446a) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,huggingface,zoo* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model huggingface zoo" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,huggingface,zoo` + +`cm run script --tags=get,ml-model,huggingface,zoo[,variations] [--input_flags]` + +*or* + +`cmr "get ml-model huggingface zoo"` + +`cmr "get ml-model huggingface zoo [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,huggingface,zoo' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,huggingface,zoo"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,huggingface,zoo) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model huggingface zoo[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_model-stub.#` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `#` + - Workflow: + * `_onnx-subfolder` + - Environment variables: + - *CM_HF_SUBFOLDER*: `onnx` + - Workflow: + * `_pierreguillou_bert_base_cased_squad_v1.1_portuguese` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `pierreguillou/bert-base-cased-squad-v1.1-portuguese` + - Workflow: + * `_prune` + - Environment variables: + - *CM_MODEL_TASK*: `prune` + - Workflow: + +
+ + + * Group "**download-type**" +
+ Click here to expand this section. + + * `_clone-repo` + - Environment variables: + - *CM_GIT_CLONE_REPO*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,git,repo,_lfs + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--download_path=value` → `CM_DOWNLOAD_PATH=value` +* `--env_key=value` → `CM_MODEL_ZOO_ENV_KEY=value` +* `--full_subfolder=value` → `CM_HF_FULL_SUBFOLDER=value` +* `--model_filename=value` → `CM_MODEL_ZOO_FILENAME=value` +* `--revision=value` → `CM_HF_REVISION=value` +* `--subfolder=value` → `CM_HF_SUBFOLDER=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "download_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_huggingface_hub + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-huggingface-zoo/_cm.json) + +___ +### Script output +`cmr "get ml-model huggingface zoo [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_ML_MODEL*` +* `CM_MODEL_ZOO_STUB` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_'+env_key+'_FILE_WITH_PATH` +* `CM_ML_MODEL_'+env_key+'_PATH` +* `CM_ML_MODEL_PATH` \ No newline at end of file diff --git a/script/get-ml-model-huggingface-zoo/_cm.json b/script/get-ml-model-huggingface-zoo/_cm.json new file mode 100644 index 0000000000..912952bb0c --- /dev/null +++ b/script/get-ml-model-huggingface-zoo/_cm.json @@ -0,0 +1,88 @@ +{ + "alias": "get-ml-model-huggingface-zoo", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_huggingface_hub" + } + ], + "input_mapping": { + "download_path": "CM_DOWNLOAD_PATH", + "model_filename": "CM_MODEL_ZOO_FILENAME", + "env_key": "CM_MODEL_ZOO_ENV_KEY", + "subfolder": "CM_HF_SUBFOLDER", + "revision": "CM_HF_REVISION", + "full_subfolder": "CM_HF_FULL_SUBFOLDER" + }, + "env": {}, + "new_env_keys": [ + "CM_ML_MODEL*", + "CM_MODEL_ZOO_STUB" + ], + "tags": [ + "get", + "ml-model", + "model", + "zoo", + "raw", + "model-zoo", + "huggingface" + ], + "tags_help": "get ml-model huggingface zoo", + "uid": "53cf8252a443446a", + "variations": { + "model-stub.#": { + "env": { + "CM_MODEL_ZOO_STUB": "#" + } + }, + "onnx-subfolder": { + "env": { + "CM_HF_SUBFOLDER": "onnx" + } + }, + "pierreguillou_bert_base_cased_squad_v1.1_portuguese": { + "env": { + "CM_MODEL_ZOO_STUB": "pierreguillou/bert-base-cased-squad-v1.1-portuguese" + } + }, + "prune":{ + "env":{ + "CM_MODEL_TASK": "prune" + } + }, + "clone-repo": { + "group": "download-type", + "env": { + "CM_GIT_CLONE_REPO": "yes" + }, + "deps": [ + { + "tags": "get,git,repo,_lfs", + "update_tags_from_env_with_prefix": { + "_repo.https://huggingface.co/": [ + "CM_MODEL_ZOO_STUB" + ] + }, + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_ML_MODEL_PATH" + } + } + ] + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} + diff --git a/script/get-ml-model-huggingface-zoo/customize.py b/script/get-ml-model-huggingface-zoo/customize.py new file mode 100644 index 0000000000..8770e5bcb4 --- /dev/null +++ b/script/get-ml-model-huggingface-zoo/customize.py @@ -0,0 +1,50 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + script_path = i['run_script_input']['path'] + + path = env.get('CM_DOWNLOAD_PATH', '') + if path == '': + path = os.getcwd() + + if env.get('CM_GIT_CLONE_REPO', '') != 'yes': + run_cmd = env.get('CM_PYTHON_BIN_WITH_PATH') + " " + os.path.join(script_path, 'download_model.py') + else: + run_cmd = '' + + env['CM_RUN_CMD'] = run_cmd + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env_key = env.get('CM_MODEL_ZOO_ENV_KEY', '') + + path_file = env.get('CM_ML_MODEL_FILE_WITH_PATH','') + if path_file!='': + path_dir = os.path.dirname(path_file) + + env['CM_ML_MODEL_PATH'] = path_dir + + if env_key!='': + env['CM_ML_MODEL_'+env_key+'_PATH'] = path_dir + + else: + path_dir = env['CM_ML_MODEL_PATH'] + + if env_key!='': + env['CM_ML_MODEL_'+env_key+'_FILE_WITH_PATH'] = path_dir + + return {'return':0} diff --git a/script/get-ml-model-huggingface-zoo/download_model.py b/script/get-ml-model-huggingface-zoo/download_model.py new file mode 100644 index 0000000000..4e6e9c86e8 --- /dev/null +++ b/script/get-ml-model-huggingface-zoo/download_model.py @@ -0,0 +1,107 @@ +from huggingface_hub import hf_hub_download +import os + +model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') +model_task = os.environ.get('CM_MODEL_TASK', '') + +revision = os.environ.get('CM_HF_REVISION','') + +if model_task == "prune": + print("Downloading model: " + model_stub) + + for filename in ["pytorch_model.bin", "config.json"]: + + downloaded_model_path = hf_hub_download(repo_id=model_stub, + filename=filename, + cache_dir=os.getcwd()) + + with open('tmp-run-env.out', 'w') as f: + f.write(f"CM_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),'')}") + +else: + subfolder = os.environ.get('CM_HF_SUBFOLDER', '') + full_subfolder = os.environ.get('CM_HF_FULL_SUBFOLDER', '') + + model_filename = os.environ.get('CM_MODEL_ZOO_FILENAME', '') + if model_filename == '': + model_filename = 'model.onnx' + + model_filenames = model_filename.split(',') if ',' in model_filename else [model_filename] + + # First must be model + base_model_filename = model_filenames[0] + + files = [] + if full_subfolder!='': + + from huggingface_hub import HfFileSystem + fs = HfFileSystem() + + # List all files in a directory + path = model_stub+'/'+full_subfolder + + print ('') + print ('Listing files in {} ...'.format(path)) + + def list_hf_files(path): + all_files = [] + + xrevision = None if revision == '' else revision + files=fs.ls(path, revision=xrevision) #, detail=False) + + for f in files: + fname = f['name'] + fdir = f['type'] == 'directory' + + if fdir: + all_files += list_hf_files(fname) + else: + all_files.append(fname) + + return all_files + + + files=list_hf_files(path) + + print ('') + print ('Found {} files'.format(len(files))) + + for f in files: + + remove = len(model_stub)+1 + + if revision!='': + remove+=len(revision)+1 + + ff = f[remove:] + + if ff not in model_filenames: + model_filenames.append(ff) + + + print ('') + for model_filename in model_filenames: + + print("Downloading file {} / {} ...".format(model_stub, model_filename)) + + extra_dir = os.path.dirname(model_filename) + + if extra_dir!='' and not os.path.exists(extra_dir): + os.makedirs(extra_dir) + + + xrevision = None if revision == '' else revision + xsubfolder = None if subfolder == '' else subfolder + + hf_hub_download(repo_id=model_stub, + subfolder=xsubfolder, + filename=model_filename, + force_filename=model_filename, + revision=xrevision, + cache_dir=os.getcwd()) + + + print ('') + + with open('tmp-run-env.out', 'w') as f: + f.write(f"CM_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),base_model_filename)}") diff --git a/script/get-ml-model-huggingface-zoo/run.bat b/script/get-ml-model-huggingface-zoo/run.bat new file mode 100644 index 0000000000..27155cb427 --- /dev/null +++ b/script/get-ml-model-huggingface-zoo/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\download_model.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-ml-model-huggingface-zoo/run.sh b/script/get-ml-model-huggingface-zoo/run.sh new file mode 100644 index 0000000000..111f4f2c80 --- /dev/null +++ b/script/get-ml-model-huggingface-zoo/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit $? diff --git a/script/get-ml-model-llama2/README.md b/script/get-ml-model-llama2/README.md new file mode 100644 index 0000000000..b22c8b6ad0 --- /dev/null +++ b/script/get-ml-model-llama2/README.md @@ -0,0 +1,223 @@ +Automatically generated README for this automation recipe: **get-ml-model-llama2** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-llama2,5db97be9f61244c6) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-llama2)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization` + +`cm run script --tags=get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization[,variations] [--input_flags]` + +*or* + +`cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization"` + +`cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get raw ml-model language-processing llama2 llama2-70b text-summarization[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_ML_MODEL_BATCH_SIZE*: `#` + - Workflow: + * `_pytorch,fp32` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - Workflow: + +
+ + + * Group "**huggingface-stub**" +
+ Click here to expand this section. + + * **`_meta-llama/Llama-2-70b-chat-hf`** (default) + - Environment variables: + - *CM_GIT_CHECKOUT_FOLDER*: `Llama-2-70b-chat-hf` + - *CM_MODEL_ZOO_ENV_KEY*: `LLAMA2` + - Workflow: + * `_meta-llama/Llama-2-7b-chat-hf` + - Environment variables: + - *CM_GIT_CHECKOUT_FOLDER*: `Llama-2-7b-chat-hf` + - *CM_MODEL_ZOO_ENV_KEY*: `LLAMA2` + - Workflow: + * `_stub.#` + - Environment variables: + - *CM_MODEL_ZOO_ENV_KEY*: `LLAMA2` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `int8` + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `uint8` + - *CM_ML_MODEL_PRECISION*: `uint8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `uint8` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_meta-llama/Llama-2-70b-chat-hf,_pytorch` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--checkpoint=value` → `LLAMA2_CHECKPOINT_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "checkpoint":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-llama2/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-llama2/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-llama2/_cm.json)*** + * get,ml-model,huggingface,zoo,_clone-repo + * `if (CM_TMP_REQUIRE_DOWNLOAD == yes)` + * CM names: `--adr.['hf-zoo']...` + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-llama2/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-llama2/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-llama2/_cm.json) + +___ +### Script output +`cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +* `LLAMA2_CHECKPOINT_PATH` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_PATH` \ No newline at end of file diff --git a/script/get-ml-model-llama2/_cm.json b/script/get-ml-model-llama2/_cm.json new file mode 100644 index 0000000000..e8a011a50a --- /dev/null +++ b/script/get-ml-model-llama2/_cm.json @@ -0,0 +1,127 @@ +{ + "alias": "get-ml-model-llama2", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL_DATASET": "openorca", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "input_mapping": { + "checkpoint": "LLAMA2_CHECKPOINT_PATH" + }, + "new_env_keys": [ + "CM_ML_MODEL_*", + "LLAMA2_CHECKPOINT_PATH" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_TMP_REQUIRE_DOWNLOAD": [ + "yes" + ] + }, + "env": { + }, + "force_env_keys": [ + "CM_GIT_CHECKOUT_FOLDER" + ], + "names": [ + "hf-zoo" + ], + "tags": "get,ml-model,huggingface,zoo,_clone-repo" + } + ], + "tags": [ + "get", + "raw", + "ml-model", + "language-processing", + "llama2", + "llama2-70b", + "text-summarization" + ], + "uid": "5db97be9f61244c6", + "variations": { + "meta-llama/Llama-2-70b-chat-hf": { + "group": "huggingface-stub", + "default": true, + "env": { + "CM_GIT_CHECKOUT_FOLDER": "Llama-2-70b-chat-hf", + "CM_MODEL_ZOO_ENV_KEY": "LLAMA2" + }, + "adr": { + "hf-zoo": { + "tags": "_model-stub.meta-llama/Llama-2-70b-chat-hf" + } + } + }, + "meta-llama/Llama-2-7b-chat-hf": { + "group": "huggingface-stub", + "env": { + "CM_GIT_CHECKOUT_FOLDER": "Llama-2-7b-chat-hf", + "CM_MODEL_ZOO_ENV_KEY": "LLAMA2" + }, + "adr": { + "hf-zoo": { + "tags": "_model-stub.meta-llama/Llama-2-7b-chat-hf" + } + } + }, + "stub.#": { + "group": "huggingface-stub", + "env": { + "CM_MODEL_ZOO_ENV_KEY": "LLAMA2" + }, + "adr": { + "hf-zoo": { + "tags": "_model-stub.#" + } + } + }, + "batch_size.#": { + "env": { + "CM_ML_MODEL_BATCH_SIZE": "#" + } + }, + "fp32": { + "default": true, + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision" + }, + "int8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "int8", + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "int8" + }, + "group": "precision" + }, + "pytorch": { + "default": true, + "env": { + "CM_ML_MODEL_FRAMEWORK": "pytorch" + }, + "group": "framework" + }, + "pytorch,fp32": { + "env": {} + }, + "uint8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "uint8", + "CM_ML_MODEL_PRECISION": "uint8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "uint8" + }, + "group": "precision" + } + }, + "print_env_at_the_end" : { + "LLAMA2_CHECKPOINT_PATH": "LLAMA2 checkpoint path" + } +} diff --git a/script/get-ml-model-llama2/customize.py b/script/get-ml-model-llama2/customize.py new file mode 100644 index 0000000000..66edd085f8 --- /dev/null +++ b/script/get-ml-model-llama2/customize.py @@ -0,0 +1,24 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + path = env.get('LLAMA2_CHECKPOINT_PATH', '').strip() + + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['LLAMA2_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] + env['CM_ML_MODEL_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + + return {'return':0} diff --git a/script/get-ml-model-mobilenet/README-extra.md b/script/get-ml-model-mobilenet/README-extra.md new file mode 100644 index 0000000000..63766e9605 --- /dev/null +++ b/script/get-ml-model-mobilenet/README-extra.md @@ -0,0 +1,15 @@ +# Get ML Model MobileNet +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the MobileNet model and adds it to CM cache with relevant meta data. + +## How To +```bash +cm run script --tags=get,ml-model,mobilenet,_[VARIATION] +``` +where, +* `[VARIATION]` is one of `tf-fp32`, `tf-int8`, `onnx-v1-opset-8`, `onnx-v1-opset-11`, `onnx-int8`. + +## Exported Variables +* `CM_ML_MODEL_FILE:` Model filename +* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* More env variables being exported are given in [cm.json file](_cm.json) diff --git a/script/get-ml-model-mobilenet/README.md b/script/get-ml-model-mobilenet/README.md new file mode 100644 index 0000000000..887cf9d012 --- /dev/null +++ b/script/get-ml-model-mobilenet/README.md @@ -0,0 +1,472 @@ +Automatically generated README for this automation recipe: **get-ml-model-mobilenet** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-mobilenet,ce46675a3ab249e4) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-mobilenet)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification` + +`cm run script --tags=get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification[,variations] ` + +*or* + +`cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification"` + +`cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model mobilenet raw ml-model-mobilenet image-classification[variations]" ` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_quantized_` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_NAME_SUFFIX*: `_quant` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `yes` + - Workflow: + * `_tf,from.google,v2,quantized_` + - Environment variables: + - *CM_PACKAGE_URL*: `https://storage.googleapis.com/mobilenet_v2/checkpoints/<<>>_v2_<<>>_<<>>.tgz` + - *CM_ML_MODEL_WEIGHTS_FILE*: `<<>>_v2_<<>>_<<>>.ckpt.data-00000-of-00001` + - *CM_ML_MODEL_FILE*: `model.tflite` + - *CM_EXTRACT_FOLDER*: `v2_<<>>_<<>>` + - *CM_UNTAR*: `yes` + - Workflow: + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_onnx,fp32,v1` + - Environment variables: + - *CM_ML_MODEL_NORMALIZE_DATA*: `yes` + - *CM_ML_MODEL_SUBTRACT_MEANS*: `no` + - *CM_ML_MODEL_VER*: `1_1.0_224` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `input:0` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `MobilenetV1/Predictions/Reshape_1:0` + - Workflow: + * `_onnx,int8,v1` + - Environment variables: + - *CM_ML_MODEL_NORMALIZE_DATA*: `no` + - *CM_ML_MODEL_SUBTRACT_MEANS*: `yes` + - *CM_ML_MODEL_GIVEN_CHANNEL_MEANS*: `128.0 128.0 128.0` + - *CM_ML_MODEL_VER*: `1_1.0_224_quant` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `0` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `169` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3353417/files/Quantized%20MobileNet.zip` + - *CM_ML_MODEL_FILE*: `mobilenet_sym_no_bn.onnx` + - *CM_UNZIP*: `yes` + - Workflow: + * `_onnx,opset-11,fp32,v1` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/4735651/files/mobilenet_v1_1.0_224.onnx` + - Workflow: + * `_onnx,opset-8,fp32,v1` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx` + - Workflow: + * `_tf,fp32,v1,resolution-224,multiplier-1.0` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `71.676` + - Workflow: + * `_tf,from.google,v1` + - Environment variables: + - *CM_PACKAGE_URL*: `http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_<<>>_<<>><<>>.tgz` + - *CM_UNTAR*: `yes` + - Workflow: + * `_tf,from.google,v2,fp32` + - Environment variables: + - *CM_PACKAGE_URL*: `https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_<<>>_<<>>.tgz` + - *CM_ML_MODEL_WEIGHTS_FILE*: `mobilenet_v2_<<>>_<<>>.ckpt.data-00000-of-00001` + - *CM_ML_MODEL_FILE*: `mobilenet_v2_<<>>_<<>>.tflite` + - *CM_UNTAR*: `yes` + - Workflow: + * `_tf,from.google,v3` + - Environment variables: + - *CM_PACKAGE_URL*: `https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-<<>>_<<>>_<<>>_<<>>.tgz` + - *CM_EXTRACT_FOLDER*: `v3-<<>>_<<>>_<<>>_<<>>` + - *CM_ML_MODEL_FILE*: `v3-<<>>_<<>>_<<>>_<<>>.tflite` + - *CM_UNTAR*: `yes` + - Workflow: + * `_tf,from.zenodo,v1` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/2269307/files/mobilenet_v1_<<>>_<<>><<>>.tgz` + - *CM_UNTAR*: `yes` + - Workflow: + * `_tf,int8,v1,resolution-224,multiplier-1.0` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `70.762` + - Workflow: + * `_tf,v1` + - Environment variables: + - *CM_ML_MODEL_VER*: `1_<<>>_<<>><<>>_2018_08_02` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `MobilenetV1/Predictions/Reshape_1` + - *CM_ML_MODEL_WEIGHTS_FILE*: `mobilenet_v1_<<>>_<<>><<>>.ckpt.data-00000-of-00001` + - *CM_ML_MODEL_FILE*: `mobilenet_v1_<<>>_<<>><<>>.tflite` + - Workflow: + * `_tf,v1,fp32` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_NAME_PREFIX*: `` + - Workflow: + * `_tf,v1,int8` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_NAME_SUFFIX*: `_quant` + - Workflow: + * `_tf,v1,uint8` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_NAME_SUFFIX*: `_quant` + - Workflow: + * `_tf,v2,fp32` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_NAME_PREFIX*: `` + - *CM_ML_MODEL_VER*: `2_<<>>_<<>>` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `MobilenetV2/Predictions/Reshape_1` + - Workflow: + * `_tf,v2,int8` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_NAME_PREFIX*: `quantized` + - *CM_ML_MODEL_VER*: `2_<<>>_<<>>` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `MobilenetV2/Predictions/Softmax` + - Workflow: + * `_tf,v2,uint8` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_NAME_PREFIX*: `quantized` + - *CM_ML_MODEL_VER*: `2_<<>>_<<>>` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `MobilenetV2/Predictions/Softmax` + - Workflow: + * `_tf,v3` + - Environment variables: + - *CM_ML_MODEL_VER*: `3_<<>>_<<>>` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `MobilenetV3/Predictions/Softmax` + - Workflow: + * `_tflite` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_onnx` + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NCHW` + - *CM_ML_MODEL_FRAMEWORK*: `onnx` + - Workflow: + * **`_tf`** (default) + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NHWC` + - *CM_ML_MODEL_NORMALIZE_DATA*: `yes` + - *CM_ML_MODEL_SUBTRACT_MEANS*: `no` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `input` + - Workflow: + +
+ + + * Group "**kind**" +
+ Click here to expand this section. + + * `_large` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_KIND*: `large` + - Workflow: + * `_large-minimalistic` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_KIND*: `large-minimalistic` + - Workflow: + * `_small` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_KIND*: `small` + - Workflow: + * `_small-minimalistic` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_KIND*: `small-minimalistic` + - Workflow: + +
+ + + * Group "**multiplier**" +
+ Click here to expand this section. + + * `_multiplier-0.25` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_MULTIPLIER*: `0.25` + - *CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE*: `25` + - Workflow: + * `_multiplier-0.35` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_MULTIPLIER*: `0.35` + - *CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE*: `35` + - Workflow: + * `_multiplier-0.5` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_MULTIPLIER*: `0.5` + - *CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE*: `50` + - Workflow: + * `_multiplier-0.75` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_MULTIPLIER*: `0.75` + - *CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE*: `75` + - Workflow: + * `_multiplier-1.0` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_MULTIPLIER*: `1.0` + - *CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE*: `100` + - Workflow: + +
+ + + * Group "**opset-version**" +
+ Click here to expand this section. + + * `_opset-11` + - Environment variables: + - *CM_ML_MODEL_ONNX_OPSET*: `11` + - Workflow: + * `_opset-8` + - Environment variables: + - *CM_ML_MODEL_ONNX_OPSET*: `8` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_MOBILENET_PRECISION*: `float` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_MOBILENET_PRECISION*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `uint8` + - *CM_ML_MODEL_PRECISION*: `uint8` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `uint8` + - *CM_ML_MODEL_MOBILENET_PRECISION*: `uint8` + - Workflow: + +
+ + + * Group "**resolution**" +
+ Click here to expand this section. + + * `_resolution-128` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `128` + - *CM_ML_MODEL_IMAGE_HEIGHT*: `128` + - *CM_ML_MODEL_IMAGE_WIDTH*: `128` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.128` + - Workflow: + * `_resolution-160` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `160` + - *CM_ML_MODEL_IMAGE_HEIGHT*: `160` + - *CM_ML_MODEL_IMAGE_WIDTH*: `160` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.160` + - Workflow: + * `_resolution-192` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `192` + - *CM_ML_MODEL_IMAGE_HEIGHT*: `192` + - *CM_ML_MODEL_IMAGE_WIDTH*: `192` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.192` + - Workflow: + * `_resolution-224` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_RESOLUTION*: `224` + - *CM_ML_MODEL_IMAGE_HEIGHT*: `224` + - *CM_ML_MODEL_IMAGE_WIDTH*: `224` + - *CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS*: `_resolution.224` + - Workflow: + +
+ + + * Group "**source**" +
+ Click here to expand this section. + + * `_from.google` + - Environment variables: + - *CM_DOWNLOAD_SOURCE*: `google` + - Workflow: + * `_from.zenodo` + - Environment variables: + - *CM_DOWNLOAD_SOURCE*: `zenodo` + - Workflow: + +
+ + + * Group "**version**" +
+ Click here to expand this section. + + * `_v1` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_VERSION*: `1` + - *CM_ML_MODEL_FULL_NAME*: `mobilenet-v1-precision_<<>>-<<>>-<<>>` + - Workflow: + * `_v2` + - Environment variables: + - *CM_ML_MODEL_MOBILENET_VERSION*: `2` + - *CM_ML_MODEL_VER*: `2` + - *CM_ML_MODEL_FULL_NAME*: `mobilenet-v2-precision_<<>>-<<>>-<<>>` + - Workflow: + * **`_v3`** (default) + - Environment variables: + - *CM_ML_MODEL_MOBILENET_VERSION*: `3` + - *CM_ML_MODEL_VER*: `3` + - *CM_ML_MODEL_FULL_NAME*: `mobilenet-v3-precision_<<>>-<<>>-<<>>` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_tf,_v3` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_ML_MODEL: `mobilenet` +* CM_ML_MODEL_DATASET: `imagenet2012-val` +* CM_ML_MODEL_RETRAINING: `no` +* CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `no` +* CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` +* CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` +* CM_ML_MODEL_MOBILENET_NAME_SUFFIX: `` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-mobilenet/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-mobilenet/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-mobilenet/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-mobilenet/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-mobilenet/_cm.json) + +___ +### Script output +`cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_PATH` +* `CM_ML_MODEL_STARTING_WEIGHTS_FILENAME` \ No newline at end of file diff --git a/script/get-ml-model-mobilenet/_cm.json b/script/get-ml-model-mobilenet/_cm.json new file mode 100644 index 0000000000..c4d1e2cd4c --- /dev/null +++ b/script/get-ml-model-mobilenet/_cm.json @@ -0,0 +1,382 @@ +{ + "alias": "get-ml-model-mobilenet", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "default_env": { + "CM_ML_MODEL": "mobilenet", + "CM_ML_MODEL_DATASET": "imagenet2012-val", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_MOBILENET_NAME_SUFFIX": "" + }, + "new_env_keys": [ + "CM_ML_MODEL_*", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS" + ], + "tags": [ + "get", + "ml-model", + "mobilenet", + "raw", + "ml-model-mobilenet", + "image-classification" + ], + "uid": "ce46675a3ab249e4", + "variations": { + "fp32": { + "group": "precision", + "default": true, + "env": { + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_MOBILENET_PRECISION": "float" + } + }, + "uint8": { + "group": "precision", + "base": [ "quantized_" ], + "env": { + "CM_ML_MODEL_INPUTS_DATA_TYPE": "uint8", + "CM_ML_MODEL_PRECISION": "uint8", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "uint8", + "CM_ML_MODEL_MOBILENET_PRECISION": "uint8" + } + }, + "int8": { + "group": "precision", + "base": [ "quantized_" ], + "env": { + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int8", + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_MOBILENET_PRECISION": "int8" + } + }, + "quantized_": { + "env": { + "CM_ML_MODEL_MOBILENET_NAME_SUFFIX": "_quant", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "yes" + } + }, + "onnx,fp32,v1": { + "env": { + "CM_ML_MODEL_NORMALIZE_DATA": "yes", + "CM_ML_MODEL_SUBTRACT_MEANS": "no", + "CM_ML_MODEL_VER": "1_1.0_224", + "CM_ML_MODEL_INPUT_LAYER_NAME": "input:0", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "MobilenetV1/Predictions/Reshape_1:0" + } + }, + "onnx,int8,v1": { + "env": { + "CM_ML_MODEL_NORMALIZE_DATA": "no", + "CM_ML_MODEL_SUBTRACT_MEANS": "yes", + "CM_ML_MODEL_GIVEN_CHANNEL_MEANS": "128.0 128.0 128.0", + "CM_ML_MODEL_VER": "1_1.0_224_quant", + "CM_ML_MODEL_INPUT_LAYER_NAME": "0", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "169", + "CM_PACKAGE_URL": "https://zenodo.org/record/3353417/files/Quantized%20MobileNet.zip", + "CM_ML_MODEL_FILE": "mobilenet_sym_no_bn.onnx", + "CM_UNZIP": "yes" + } + }, + "onnx": { + "group": "framework", + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NCHW", + "CM_ML_MODEL_FRAMEWORK": "onnx" + } + }, + "opset-11": { + "group": "opset-version", + "env": { + "CM_ML_MODEL_ONNX_OPSET": "11" + } + }, + "opset-8": { + "group": "opset-version", + "env": { + "CM_ML_MODEL_ONNX_OPSET": "8" + } + }, + "onnx,opset-11,fp32,v1": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/4735651/files/mobilenet_v1_1.0_224.onnx" + } + }, + "onnx,opset-8,fp32,v1": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx" + } + }, + "v1": { + "group": "version", + "default_variations": { + "resolution": "resolution-224", + "multiplier": "multiplier-1.0" + }, + "env": { + "CM_ML_MODEL_MOBILENET_VERSION": "1", + "CM_ML_MODEL_FULL_NAME": "mobilenet-v1-precision_<<>>-<<>>-<<>>" + } + }, + "v2": { + "group": "version", + "default_variations": { + "resolution": "resolution-224", + "multiplier": "multiplier-1.0" + }, + "env": { + "CM_ML_MODEL_MOBILENET_VERSION": "2", + "CM_ML_MODEL_VER": "2", + "CM_ML_MODEL_FULL_NAME": "mobilenet-v2-precision_<<>>-<<>>-<<>>" + } + }, + "v3": { + "group": "version", + "default": true, + "default_variations": { + "resolution": "resolution-224", + "multiplier": "multiplier-1.0" + }, + "env": { + "CM_ML_MODEL_MOBILENET_VERSION": "3", + "CM_ML_MODEL_VER": "3", + "CM_ML_MODEL_FULL_NAME": "mobilenet-v3-precision_<<>>-<<>>-<<>>" + } + }, + "tf,v1": { + "env": { + "CM_ML_MODEL_VER": "1_<<>>_<<>><<>>_2018_08_02", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "MobilenetV1/Predictions/Reshape_1", + "CM_ML_MODEL_WEIGHTS_FILE": "mobilenet_v1_<<>>_<<>><<>>.ckpt.data-00000-of-00001", + "CM_ML_MODEL_FILE": "mobilenet_v1_<<>>_<<>><<>>.tflite" + } + }, + "tf,v3": { + "env": { + "CM_ML_MODEL_VER": "3_<<>>_<<>>", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "MobilenetV3/Predictions/Softmax" + } + }, + "resolution-128": { + "group": "resolution", + "env": { + "CM_ML_MODEL_MOBILENET_RESOLUTION": "128", + "CM_ML_MODEL_IMAGE_HEIGHT": "128", + "CM_ML_MODEL_IMAGE_WIDTH": "128", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.128" + } + }, + "resolution-160": { + "group": "resolution", + "env": { + "CM_ML_MODEL_MOBILENET_RESOLUTION": "160", + "CM_ML_MODEL_IMAGE_HEIGHT": "160", + "CM_ML_MODEL_IMAGE_WIDTH": "160", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.160" + } + }, + "resolution-192": { + "group": "resolution", + "env": { + "CM_ML_MODEL_MOBILENET_RESOLUTION": "192", + "CM_ML_MODEL_IMAGE_HEIGHT": "192", + "CM_ML_MODEL_IMAGE_WIDTH": "192", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.192" + } + }, + "resolution-224": { + "group": "resolution", + "env": { + "CM_ML_MODEL_MOBILENET_RESOLUTION": "224", + "CM_ML_MODEL_IMAGE_HEIGHT": "224", + "CM_ML_MODEL_IMAGE_WIDTH": "224", + "CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS": "_resolution.224" + } + }, + "multiplier-1.0": { + "group": "multiplier", + "env": { + "CM_ML_MODEL_MOBILENET_MULTIPLIER": "1.0", + "CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE": "100" + } + }, + "multiplier-0.75": { + "group": "multiplier", + "env": { + "CM_ML_MODEL_MOBILENET_MULTIPLIER": "0.75", + "CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE": "75" + } + }, + "multiplier-0.5": { + "group": "multiplier", + "env": { + "CM_ML_MODEL_MOBILENET_MULTIPLIER": "0.5", + "CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE": "50" + } + }, + "multiplier-0.35": { + "group": "multiplier", + "env": { + "CM_ML_MODEL_MOBILENET_MULTIPLIER": "0.35", + "CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE": "35" + } + }, + "multiplier-0.25": { + "group": "multiplier", + "env": { + "CM_ML_MODEL_MOBILENET_MULTIPLIER": "0.25", + "CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE": "25" + } + }, + "large-minimalistic": { + "group": "kind", + "env": { + "CM_ML_MODEL_MOBILENET_KIND": "large-minimalistic" + } + }, + "large": { + "group": "kind", + "env": { + "CM_ML_MODEL_MOBILENET_KIND": "large" + } + }, + "small-minimalistic": { + "group": "kind", + "default_variations": { + "precision": "fp32" + }, + "env": { + "CM_ML_MODEL_MOBILENET_KIND": "small-minimalistic" + } + }, + "small": { + "group": "kind", + "env": { + "CM_ML_MODEL_MOBILENET_KIND": "small" + } + }, + "tf": { + "group": "framework", + "default": true, + "default_variations": { + "source": "from.google" + }, + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NHWC", + "CM_ML_MODEL_NORMALIZE_DATA": "yes", + "CM_ML_MODEL_SUBTRACT_MEANS": "no", + "CM_ML_MODEL_INPUT_LAYER_NAME": "input" + } + }, + "from.zenodo": { + "group": "source", + "env": { + "CM_DOWNLOAD_SOURCE": "zenodo" + } + }, + "from.google": { + "group": "source", + "env": { + "CM_DOWNLOAD_SOURCE": "google" + } + }, + "tf,from.zenodo,v1": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/2269307/files/mobilenet_v1_<<>>_<<>><<>>.tgz", + "CM_UNTAR": "yes" + } + }, + "tf,from.google,v1": { + "env": { + "CM_PACKAGE_URL": "http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_<<>>_<<>><<>>.tgz", + "CM_UNTAR": "yes" + } + }, + "tf,from.google,v2,quantized_": { + "env": { + "CM_PACKAGE_URL": "https://storage.googleapis.com/mobilenet_v2/checkpoints/<<>>_v2_<<>>_<<>>.tgz", + "CM_ML_MODEL_WEIGHTS_FILE": "<<>>_v2_<<>>_<<>>.ckpt.data-00000-of-00001", + "CM_ML_MODEL_FILE": "model.tflite", + "CM_EXTRACT_FOLDER": "v2_<<>>_<<>>", + "CM_UNTAR": "yes" + } + }, + "tf,from.google,v2,fp32": { + "env": { + "CM_PACKAGE_URL": "https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_<<>>_<<>>.tgz", + "CM_ML_MODEL_WEIGHTS_FILE": "mobilenet_v2_<<>>_<<>>.ckpt.data-00000-of-00001", + "CM_ML_MODEL_FILE": "mobilenet_v2_<<>>_<<>>.tflite", + "CM_UNTAR": "yes" + } + }, + "tf,from.google,v3": { + "env": { + "CM_PACKAGE_URL": "https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-<<>>_<<>>_<<>>_<<>>.tgz", + "CM_EXTRACT_FOLDER": "v3-<<>>_<<>>_<<>>_<<>>", + "CM_ML_MODEL_FILE": "v3-<<>>_<<>>_<<>>_<<>>.tflite", + "CM_UNTAR": "yes" + } + }, + "tf,v2,int8": { + "env": { + "CM_ML_MODEL_MOBILENET_NAME_PREFIX": "quantized", + "CM_ML_MODEL_VER": "2_<<>>_<<>>", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "MobilenetV2/Predictions/Softmax" + } + }, + "tf,v2,uint8": { + "env": { + "CM_ML_MODEL_MOBILENET_NAME_PREFIX": "quantized", + "CM_ML_MODEL_VER": "2_<<>>_<<>>", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "MobilenetV2/Predictions/Softmax" + } + }, + "tf,v2,fp32": { + "env": { + "CM_ML_MODEL_MOBILENET_NAME_PREFIX": "", + "CM_ML_MODEL_VER": "2_<<>>_<<>>", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "MobilenetV2/Predictions/Reshape_1" + } + }, + "tf,v1,int8": { + "env": { + "CM_ML_MODEL_MOBILENET_NAME_SUFFIX": "_quant" + } + }, + "tf,v1,uint8": { + "env": { + "CM_ML_MODEL_MOBILENET_NAME_SUFFIX": "_quant" + } + }, + "tf,v1,fp32": { + "env": { + "CM_ML_MODEL_MOBILENET_NAME_PREFIX": "" + } + }, + "tf,int8,v1,resolution-224,multiplier-1.0": { + "env": { + "CM_ML_MODEL_ACCURACY": "70.762" + } + }, + "tf,fp32,v1,resolution-224,multiplier-1.0": { + "env": { + "CM_ML_MODEL_ACCURACY": "71.676" + } + }, + "tflite": { + "base": [ + "tf" + ] + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-mobilenet/customize.py b/script/get-ml-model-mobilenet/customize.py new file mode 100644 index 0000000000..5571383453 --- /dev/null +++ b/script/get-ml-model-mobilenet/customize.py @@ -0,0 +1,52 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_PACKAGE_URL'] + env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url + + print ('Downloading from {}'.format(url)) + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url}) + if r['return']>0: return r + + filename = r['filename'] + + if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": + if env.get('CM_UNZIP') == "yes": + cmd="unzip " + elif env.get('CM_UNTAR') == "yes": + cmd="tar -xvzf " + os.system(cmd+filename) + + filename = env['CM_ML_MODEL_FILE'] + + extract_folder = env.get('CM_EXTRACT_FOLDER', '') + + if extract_folder: + env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, extract_folder, filename) + else: + env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + else: + env['CM_ML_MODEL_FILE']=filename + env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] + + env['CM_ML_MODEL_PATH']=path + + if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): + return {'return':1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + + return {'return':0} diff --git a/script/get-ml-model-neuralmagic-zoo/README.md b/script/get-ml-model-neuralmagic-zoo/README.md new file mode 100644 index 0000000000..748c025bd9 --- /dev/null +++ b/script/get-ml-model-neuralmagic-zoo/README.md @@ -0,0 +1,337 @@ +Automatically generated README for this automation recipe: **get-ml-model-neuralmagic-zoo** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-neuralmagic-zoo,adbb3f2525a14f97) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic` + +`cm run script --tags=get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic[,variations] ` + +*or* + +`cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic"` + +`cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_bert-base-pruned90-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none` + - *CM_ML_MODEL_FULL_NAME*: `bert-base-pruned90-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-base-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_bert-base-pruned95_obs_quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none` + - *CM_ML_MODEL_FULL_NAME*: `bert-base-pruned95_obs_quant-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-base-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `yes` + - Workflow: + * `_bert-base_cased-pruned90-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none` + - *CM_ML_MODEL_FULL_NAME*: `bert-base_cased-pruned90-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-base-cased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_bert-large-base-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none` + - *CM_ML_MODEL_FULL_NAME*: `bert-large-base-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_bert-large-pruned80_quant-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni` + - *CM_ML_MODEL_FULL_NAME*: `bert-large-pruned80_quant-none-vnni-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_mobilebert-14layer_pruned50-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni` + - *CM_ML_MODEL_FULL_NAME*: `mobilebert-14layer_pruned50-none-vnni-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_mobilebert-14layer_pruned50_quant-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni` + - *CM_ML_MODEL_FULL_NAME*: `mobilebert-14layer_pruned50_quant-none-vnni-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `yes` + - Workflow: + * `_mobilebert-base_quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none` + - *CM_ML_MODEL_FULL_NAME*: `mobilebert-base_quant-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `yes` + - Workflow: + * `_mobilebert-none-base-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none` + - *CM_ML_MODEL_FULL_NAME*: `mobilebert-none-base-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_model-stub.#` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `#` + - Workflow: + * `_obert-base-pruned90-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none` + - *CM_ML_MODEL_FULL_NAME*: `obert-base-pruned90-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_obert-large-base-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none` + - *CM_ML_MODEL_FULL_NAME*: `obert-large-base-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_obert-large-pruned95-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni` + - *CM_ML_MODEL_FULL_NAME*: `obert-large-pruned95-none-vnni-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_obert-large-pruned95_quant-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni` + - *CM_ML_MODEL_FULL_NAME*: `obert-large-pruned95_quant-none-vnni-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `yes` + - Workflow: + * `_obert-large-pruned97-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none` + - *CM_ML_MODEL_FULL_NAME*: `obert-large-pruned97-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `fp32` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_obert-large-pruned97-quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none` + - *CM_ML_MODEL_FULL_NAME*: `obert-large-pruned97-quant-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/bert-large-uncased` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_oberta-base-pruned90-quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none` + - *CM_ML_MODEL_FULL_NAME*: `oberta-base-pruned90-quant-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/roberta-base` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + * `_roberta-base-pruned85-quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none` + - Environment variables: + - *CM_MODEL_ZOO_STUB*: `zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none` + - *CM_ML_MODEL_FULL_NAME*: `roberta-base-pruned85-quant-none-bert-99` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/roberta-base` + - *CM_ML_MODEL_WEIGHT_TRANSFORMATIONS*: `quantization, unstructured pruning` + - *CM_ML_MODEL_WEIGHTS_DATA_TYPE*: `int8` + - *CM_ML_MODEL_INPUTS_DATA_TYPE*: `int64` + - *CM_ML_MODEL_RETRAINING*: `no` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_package.protobuf + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_sparsezoo + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-neuralmagic-zoo/_cm.json) + +___ +### Script output +`cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [,variations]" -j` +#### New environment keys (filter) + +* `CM_GET_DEPENDENT_CACHED_PATH` +* `CM_MLPERF_CUSTOM_MODEL_PATH` +* `CM_ML_MODEL*` +* `CM_MODEL_ZOO_STUB` +#### New environment keys auto-detected from customize + +* `CM_GET_DEPENDENT_CACHED_PATH` +* `CM_MLPERF_CUSTOM_MODEL_PATH` \ No newline at end of file diff --git a/script/get-ml-model-neuralmagic-zoo/_cm.json b/script/get-ml-model-neuralmagic-zoo/_cm.json new file mode 100644 index 0000000000..04a8a1ca52 --- /dev/null +++ b/script/get-ml-model-neuralmagic-zoo/_cm.json @@ -0,0 +1,302 @@ +{ + "alias": "get-ml-model-neuralmagic-zoo", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + }, + "new_env_keys": [ + "CM_ML_MODEL*", + "CM_MODEL_ZOO_STUB", + "CM_MLPERF_CUSTOM_MODEL_PATH", + "CM_GET_DEPENDENT_CACHED_PATH" + ], + "tags": [ + "get", + "ml-model", + "model", + "zoo", + "deepsparse", + "model-zoo", + "sparse-zoo", + "neuralmagic", + "neural-magic" + ], + "deps": [ + { + "tags": "get,python3", + "names": [ "python3", "python" ] + }, + { + "tags": "get,generic-python-lib,_package.protobuf", + "version_max": "3.20.1" + }, + { + "tags": "get,generic-python-lib,_sparsezoo" + } + ], + "uid": "adbb3f2525a14f97", + "variations": { + "bert-base-pruned95_obs_quant-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none", + "CM_ML_MODEL_FULL_NAME": "bert-base-pruned95_obs_quant-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-base-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "yes" + } + }, + "model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none": { + "alias": "bert-base-pruned95_obs_quant-none" + }, + "obert-large-pruned95_quant-none-vnni": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni", + "CM_ML_MODEL_FULL_NAME": "obert-large-pruned95_quant-none-vnni-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "yes" + } + }, + "model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni": { + "alias": "obert-large-pruned95_quant-none-vnni" + }, + "mobilebert-14layer_pruned50_quant-none-vnni": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni", + "CM_ML_MODEL_FULL_NAME": "mobilebert-14layer_pruned50_quant-none-vnni-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "yes" + } + }, + "model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni": { + "alias": "mobilebert-14layer_pruned50_quant-none-vnni" + }, + "mobilebert-base_quant-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none", + "CM_ML_MODEL_FULL_NAME": "mobilebert-base_quant-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "yes" + } + }, + "model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none": { + "alias": "mobilebert-base_quant-none" + }, + "mobilebert-none-base-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none", + "CM_ML_MODEL_FULL_NAME": "mobilebert-none-base-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none": { + "alias": "mobilebert-none-base-none" + }, + "oberta-base-pruned90-quant-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none", + "CM_ML_MODEL_FULL_NAME": "oberta-base-pruned90-quant-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/roberta-base", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none": { + "alias": "oberta-base-pruned90-quant-none" + }, + "roberta-base-pruned85-quant-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none", + "CM_ML_MODEL_FULL_NAME": "roberta-base-pruned85-quant-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/roberta-base", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none": { + "alias": "roberta-base-pruned85-quant-none" + }, + "mobilebert-14layer_pruned50-none-vnni": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni", + "CM_ML_MODEL_FULL_NAME": "mobilebert-14layer_pruned50-none-vnni-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni": { + "alias": "mobilebert-14layer_pruned50-none-vnni" + }, + "obert-base-pruned90-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none", + "CM_ML_MODEL_FULL_NAME": "obert-base-pruned90-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none": { + "alias": "obert-base-pruned90-none" + }, + "obert-large-pruned97-quant-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none", + "CM_ML_MODEL_FULL_NAME": "obert-large-pruned97-quant-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none": { + "alias": "obert-large-pruned97-quant-none" + }, + "bert-base_cased-pruned90-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none", + "CM_ML_MODEL_FULL_NAME": "bert-base_cased-pruned90-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-base-cased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none": { + "alias": "bert-base_cased-pruned90-none" + }, + "bert-base-pruned90-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none", + "CM_ML_MODEL_FULL_NAME": "bert-base-pruned90-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-base-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none": { + "alias": "bert-base-pruned90-none" + }, + "bert-large-pruned80_quant-none-vnni": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni", + "CM_ML_MODEL_FULL_NAME": "bert-large-pruned80_quant-none-vnni-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "quantization, unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "int8", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "int64", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni": { + "alias": "bert-large-pruned80_quant-none-vnni" + }, + "obert-large-pruned95-none-vnni": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni", + "CM_ML_MODEL_FULL_NAME": "obert-large-pruned95-none-vnni-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni": { + "alias": "obert-large-pruned95-none-vnni" + }, + "obert-large-pruned97-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none", + "CM_ML_MODEL_FULL_NAME": "obert-large-pruned97-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none": { + "alias": "obert-large-pruned97-none" + }, + "bert-large-base-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none", + "CM_ML_MODEL_FULL_NAME": "bert-large-base-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none": { + "alias": "bert-large-base-none" + }, + "obert-large-base-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none", + "CM_ML_MODEL_FULL_NAME": "obert-large-base-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/bert-large-uncased", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none": { + "alias": "obert-large-base-none" + }, + "mobilebert-none-base-none": { + "env": { + "CM_MODEL_ZOO_STUB": "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none", + "CM_ML_MODEL_FULL_NAME": "mobilebert-none-base-none-bert-99", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "unstructured pruning", + "CM_ML_MODEL_WEIGHTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_INPUTS_DATA_TYPE": "fp32", + "CM_ML_MODEL_RETRAINING": "no" + } + }, + "model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none": { + "alias": "mobilebert-none-base-none" + }, + "model-stub.#": { + "env": { + "CM_MODEL_ZOO_STUB": "#" + } + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-neuralmagic-zoo/customize.py b/script/get-ml-model-neuralmagic-zoo/customize.py new file mode 100644 index 0000000000..7ba85a9d58 --- /dev/null +++ b/script/get-ml-model-neuralmagic-zoo/customize.py @@ -0,0 +1,43 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + model_stub = env.get('CM_MODEL_ZOO_STUB', '') + if model_stub == '': + + variations = list(i.get('meta', {}).get('variations',{}).keys()) + + variation_models = [] + for v in variations: + if '#' not in v: + variation_models.append(v) + + return {'return':1, 'error':'ENV CM_MODEL_ZOO_STUB is not set. Please select variation from {}'.format(str(variation_models))} + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + onnx_path = os.path.join(env['CM_ML_MODEL_FILE_WITH_PATH'], "model.onnx") + + if os.path.exists(onnx_path): + env['CM_MLPERF_CUSTOM_MODEL_PATH'] = onnx_path + + return {'return':0} diff --git a/script/get-ml-model-neuralmagic-zoo/download_sparse.py b/script/get-ml-model-neuralmagic-zoo/download_sparse.py new file mode 100644 index 0000000000..1da36774bd --- /dev/null +++ b/script/get-ml-model-neuralmagic-zoo/download_sparse.py @@ -0,0 +1,10 @@ +from sparsezoo import Model +import os + +model_stub= os.environ.get('CM_MODEL_ZOO_STUB', '') +print(f"Downloading model {model_stub}") +stub = f"{model_stub}" +model = Model(stub) + +with open('tmp-run-env.out', 'w') as f: + f.write(f"CM_ML_MODEL_FILE_WITH_PATH={model.path}") diff --git a/script/get-ml-model-neuralmagic-zoo/run.bat b/script/get-ml-model-neuralmagic-zoo/run.bat new file mode 100644 index 0000000000..854e9b6686 --- /dev/null +++ b/script/get-ml-model-neuralmagic-zoo/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\download_sparse.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-ml-model-neuralmagic-zoo/run.sh b/script/get-ml-model-neuralmagic-zoo/run.sh new file mode 100644 index 0000000000..9d7d529be3 --- /dev/null +++ b/script/get-ml-model-neuralmagic-zoo/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py diff --git a/script/get-ml-model-resnet50/README-extra.md b/script/get-ml-model-resnet50/README-extra.md new file mode 100644 index 0000000000..42809e535e --- /dev/null +++ b/script/get-ml-model-resnet50/README-extra.md @@ -0,0 +1,15 @@ +# Get ML Model Resnet50 +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the Resnet50 model and adds it to CM cache with relevant meta data. + +## How To +```bash +cm run script --tags=get,ml-model,resnet50,_[VARIATION] +``` +where, +* `[VARIATION]` is one of `onnx` (alias `onnxruntime`), `pytorch`, `tensorflow` (alias `tf`) , `fp32`, `onnx-1.5-opset-8`, `onnx-1.5-opset-11`. + +## Exported Variables +* `CM_ML_MODEL_FILE:` Model filename +* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* More env variables being exported are given in [cm.json file](_cm.json) diff --git a/script/get-ml-model-resnet50/README.md b/script/get-ml-model-resnet50/README.md new file mode 100644 index 0000000000..298138021a --- /dev/null +++ b/script/get-ml-model-resnet50/README.md @@ -0,0 +1,358 @@ +Automatically generated README for this automation recipe: **get-ml-model-resnet50** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-resnet50,56203e4e998b4bc0) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,raw,ml-model,resnet50,ml-model-resnet50,image-classification* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,raw,ml-model,resnet50,ml-model-resnet50,image-classification` + +`cm run script --tags=get,raw,ml-model,resnet50,ml-model-resnet50,image-classification[,variations] ` + +*or* + +`cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification"` + +`cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,resnet50,ml-model-resnet50,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,raw,ml-model,resnet50,ml-model-resnet50,image-classification"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,raw,ml-model,resnet50,ml-model-resnet50,image-classification) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get raw ml-model resnet50 ml-model-resnet50 image-classification[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_ML_MODEL_BATCH_SIZE*: `#` + - Workflow: + * `_batch_size.1` + - Environment variables: + - *CM_ML_MODEL_BATCH_SIZE*: `1` + - Workflow: + * `_fix-input-shape` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * `_from-tf` + - Workflow: + * `_huggingface_default` + - Environment variables: + - *CM_PACKAGE_URL*: `https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx` + - Workflow: + * `_ncnn,fp32` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/8073420/files/resnet50_v1.bin?download=1` + - Workflow: + 1. ***Read "post_deps" on other CM scripts*** + * download-and-extract,_url.https://zenodo.org/record/8073420/files/resnet50_v1.param?download= + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + * `_onnx,from-tf` + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NHWC` + - *CM_ML_MODEL_FRAMEWORK*: `onnx` + - *CM_ML_MODEL_INPUT_LAYERS*: `input_tensor` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `input_tensor` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor\": (BATCH_SIZE, 224, 224, 3)` + - *CM_ML_MODEL_OUTPUT_LAYERS*: `softmax_tensor` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `softmax_tensor` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://zenodo.org/record/2535873/files/resnet50_v1.pb` + - Workflow: + * `_onnx,from-tf,fp32` + - Environment variables: + - *CM_DOWNLOAD_FILENAME*: `resnet50_v1_modified.onnx` + - *CM_PACKAGE_URL*: `https://drive.google.com/uc?id=15wZ_8Vt12cb10IEBsln8wksD1zGwlbOM` + - Workflow: + * `_onnx,opset-11` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/4735647/files/resnet50_v1.onnx` + - Workflow: + * `_onnx,opset-8` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/2592612/files/resnet50_v1.onnx` + - Workflow: + * `_pytorch,fp32` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/4588417/files/resnet50-19c8e357.pth` + - Workflow: + * `_pytorch,int8` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/4589637/files/resnet50_INT8bit_quantized.pt` + - Workflow: + * `_tensorflow,fix-input-shape` + - Environment variables: + - *CM_ML_MODEL_TF_FIX_INPUT_SHAPE*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_package.tensorflow + * CM names: `--adr.['tensorflow']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_tflite,argmax` + - Environment variables: + - *CM_DAE_EXTRACT_DOWNLOADED*: `yes` + - *CM_DOWNLOAD_FINAL_ENV_NAME*: `` + - *CM_EXTRACT_FINAL_ENV_NAME*: `CM_ML_MODEL_FILE_WITH_PATH` + - *CM_ML_MODEL_FILE*: `resnet50_v1.tflite` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)` + - *CM_PACKAGE_URL*: `https://www.dropbox.com/s/cvv2zlfo80h54uz/resnet50_v1.tflite.gz?dl=1` + - Workflow: + * `_tflite,int8,no-argmax` + - Environment variables: + - *CM_DOWNLOAD_FINAL_ENV_NAME*: `CM_ML_MODEL_FILE_WITH_PATH` + - *CM_ML_MODEL_FILE*: `resnet50_quant_full_mlperf_edgetpu.tflite` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/8234946/files/resnet50_quant_full_mlperf_edgetpu.tflite?download=1` + - Workflow: + * `_tflite,no-argmax` + - Environment variables: + - *CM_ML_MODEL_FILE*: `resnet50_v1.no-argmax.tflite` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)` + - *CM_PACKAGE_URL*: `https://www.dropbox.com/s/vhuqo0wc39lky0a/resnet50_v1.no-argmax.tflite?dl=1` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_ncnn` + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `ncnn` + - Workflow: + * **`_onnx`** (default) + - Aliases: `_onnxruntime` + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NCHW` + - *CM_ML_MODEL_FRAMEWORK*: `onnx` + - *CM_ML_MODEL_INPUT_LAYERS*: `input_tensor:0` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `input_tensor:0` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)` + - *CM_ML_MODEL_OUTPUT_LAYERS*: `softmax_tensor:0` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `softmax_tensor:0` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `<<>>` + - *CM_ML_MODEL_VER*: `1.5` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NCHW` + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - *CM_ML_MODEL_GIVEN_CHANNEL_MEANS*: `?` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `input_tensor:0` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor:0\": [BATCH_SIZE, 3, 224, 224]` + - *CM_ML_MODEL_OUTPUT_LAYERS*: `output` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `?` + - *CM_ML_STARTING_WEIGHTS_FILENAME*: `<<>>` + - Workflow: + * `_tensorflow` + - Aliases: `_tf` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `76.456` + - *CM_ML_MODEL_DATA_LAYOUT*: `NHWC` + - *CM_ML_MODEL_FRAMEWORK*: `tensorflow` + - *CM_ML_MODEL_GIVEN_CHANNEL_MEANS*: `123.68 116.78 103.94` + - *CM_ML_MODEL_INPUT_LAYERS*: `input_tensor` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `input_tensor` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)` + - *CM_ML_MODEL_NORMALIZE_DATA*: `0` + - *CM_ML_MODEL_OUTPUT_LAYERS*: `softmax_tensor` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `softmax_tensor` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `<<>>` + - *CM_ML_MODEL_SUBTRACT_MEANS*: `YES` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/2535873/files/resnet50_v1.pb` + - Workflow: + * `_tflite` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `76.456` + - *CM_ML_MODEL_DATA_LAYOUT*: `NHWC` + - *CM_ML_MODEL_FRAMEWORK*: `tflite` + - *CM_ML_MODEL_GIVEN_CHANNEL_MEANS*: `123.68 116.78 103.94` + - *CM_ML_MODEL_INPUT_LAYERS*: `input_tensor` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `input_tensor` + - *CM_ML_MODEL_INPUT_SHAPES*: `\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)` + - *CM_ML_MODEL_NORMALIZE_DATA*: `0` + - *CM_ML_MODEL_OUTPUT_LAYERS*: `softmax_tensor` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `softmax_tensor` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `<<>>` + - *CM_ML_MODEL_SUBTRACT_MEANS*: `YES` + - Workflow: + +
+ + + * Group "**model-output**" +
+ Click here to expand this section. + + * **`_argmax`** (default) + - Environment variables: + - *CM_ML_MODEL_OUTPUT_LAYER_ARGMAX*: `yes` + - Workflow: + * `_no-argmax` + - Environment variables: + - *CM_ML_MODEL_OUTPUT_LAYER_ARGMAX*: `no` + - Workflow: + +
+ + + * Group "**opset-version**" +
+ Click here to expand this section. + + * `_opset-11` + - Environment variables: + - *CM_ML_MODEL_ONNX_OPSET*: `11` + - Workflow: + * `_opset-8` + - Environment variables: + - *CM_ML_MODEL_ONNX_OPSET*: `8` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `int8` + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `uint8` + - *CM_ML_MODEL_PRECISION*: `uint8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `uint8` + - Workflow: + +
+ + +#### Default variations + +`_argmax,_fp32,_onnx` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50/_cm.json)*** + * download-and-extract + * CM names: `--adr.['model-downloader']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + * [run-fix-input.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50/run-fix-input.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-resnet50/_cm.json) + +___ +### Script output +`cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_STARTING_FILE_PATH` \ No newline at end of file diff --git a/script/get-ml-model-resnet50/_cm.json b/script/get-ml-model-resnet50/_cm.json new file mode 100644 index 0000000000..2d42ed641b --- /dev/null +++ b/script/get-ml-model-resnet50/_cm.json @@ -0,0 +1,307 @@ +{ + "alias": "get-ml-model-resnet50", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_ML_MODEL": "RESNET50", + "CM_ML_MODEL_DATASET": "imagenet2012-val", + "CM_ML_MODEL_IMAGE_HEIGHT": "224", + "CM_ML_MODEL_IMAGE_WIDTH": "224", + "CM_ML_MODEL_NORMALIZE_DATA": "0", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_SUBTRACT_MEANS": "YES", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "new_env_keys": [ + "CM_ML_MODEL_*" + ], + "prehook_deps": [ + { + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>" + }, + "extra_cache_tags": "ml-model,resnet50,raw,ml-model-resnet50,_<<>>", + "force_cache": true, + "names": [ + "model-downloader" + ], + "tags": "download-and-extract", + "update_tags_from_env_with_prefix": { + "_url.": [ + "CM_PACKAGE_URL" + ] + } + } + ], + "tags": [ + "get", + "raw", + "ml-model", + "resnet50", + "ml-model-resnet50", + "image-classification" + ], + "uid": "56203e4e998b4bc0", + "variations": { + "argmax": { + "default": true, + "env": { + "CM_ML_MODEL_OUTPUT_LAYER_ARGMAX": "yes" + }, + "group": "model-output" + }, + "batch_size.#": { + "env": { + "CM_ML_MODEL_BATCH_SIZE": "#" + } + }, + "batch_size.1": { + "env": { + "CM_ML_MODEL_BATCH_SIZE": "1" + } + }, + "fix-input-shape": { + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ] + }, + "fp32": { + "default": true, + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision" + }, + "from-tf": {}, + "huggingface_default": { + "env": { + "CM_PACKAGE_URL": "https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx" + } + }, + "int8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "int8", + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "int8" + }, + "group": "precision" + }, + "ncnn": { + "env": { + "CM_ML_MODEL_FRAMEWORK": "ncnn" + }, + "group": "framework" + }, + "ncnn,fp32": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/8073420/files/resnet50_v1.bin?download=1" + }, + "post_deps": [ + { + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>" + }, + "extra_cache_tags": "ml-model-params,params,resnet50,ncnn,model-params", + "tags": "download-and-extract,_url.https://zenodo.org/record/8073420/files/resnet50_v1.param?download=" + } + ] + }, + "no-argmax": { + "env": { + "CM_ML_MODEL_OUTPUT_LAYER_ARGMAX": "no" + }, + "group": "model-output" + }, + "onnx": { + "default": true, + "default_variations": { + "opset-version": "opset-11" + }, + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NCHW", + "CM_ML_MODEL_FRAMEWORK": "onnx", + "CM_ML_MODEL_INPUT_LAYERS": "input_tensor:0", + "CM_ML_MODEL_INPUT_LAYER_NAME": "input_tensor:0", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor:0\\\": (BATCH_SIZE, 3, 224, 224)", + "CM_ML_MODEL_OUTPUT_LAYERS": "softmax_tensor:0", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "softmax_tensor:0", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "<<>>", + "CM_ML_MODEL_VER": "1.5" + }, + "group": "framework" + }, + "onnx,from-tf": { + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NHWC", + "CM_ML_MODEL_FRAMEWORK": "onnx", + "CM_ML_MODEL_INPUT_LAYERS": "input_tensor", + "CM_ML_MODEL_INPUT_LAYER_NAME": "input_tensor", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor\\\": (BATCH_SIZE, 224, 224, 3)", + "CM_ML_MODEL_OUTPUT_LAYERS": "softmax_tensor", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "softmax_tensor", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://zenodo.org/record/2535873/files/resnet50_v1.pb" + } + }, + "onnx,from-tf,fp32": { + "adr": { + "model-downloader": { + "tags": "_gdown" + } + }, + "env": { + "CM_DOWNLOAD_FILENAME": "resnet50_v1_modified.onnx", + "CM_PACKAGE_URL": "https://drive.google.com/uc?id=15wZ_8Vt12cb10IEBsln8wksD1zGwlbOM" + } + }, + "onnx,opset-11": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/4735647/files/resnet50_v1.onnx" + } + }, + "onnx,opset-8": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/2592612/files/resnet50_v1.onnx" + } + }, + "onnxruntime": { + "alias": "onnx" + }, + "opset-11": { + "env": { + "CM_ML_MODEL_ONNX_OPSET": "11" + }, + "group": "opset-version" + }, + "opset-8": { + "env": { + "CM_ML_MODEL_ONNX_OPSET": "8" + }, + "group": "opset-version" + }, + "pytorch": { + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NCHW", + "CM_ML_MODEL_FRAMEWORK": "pytorch", + "CM_ML_MODEL_GIVEN_CHANNEL_MEANS": "?", + "CM_ML_MODEL_INPUT_LAYER_NAME": "input_tensor:0", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor:0\\\": [BATCH_SIZE, 3, 224, 224]", + "CM_ML_MODEL_OUTPUT_LAYERS": "output", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "?", + "CM_ML_STARTING_WEIGHTS_FILENAME": "<<>>" + }, + "group": "framework" + }, + "pytorch,fp32": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/4588417/files/resnet50-19c8e357.pth" + } + }, + "pytorch,int8": { + "base": [ + "int8", + "pytorch" + ], + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/4589637/files/resnet50_INT8bit_quantized.pt" + } + }, + "tensorflow": { + "env": { + "CM_ML_MODEL_ACCURACY": "76.456", + "CM_ML_MODEL_DATA_LAYOUT": "NHWC", + "CM_ML_MODEL_FRAMEWORK": "tensorflow", + "CM_ML_MODEL_GIVEN_CHANNEL_MEANS": "123.68 116.78 103.94", + "CM_ML_MODEL_INPUT_LAYERS": "input_tensor", + "CM_ML_MODEL_INPUT_LAYER_NAME": "input_tensor", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor:0\\\": (BATCH_SIZE, 3, 224, 224)", + "CM_ML_MODEL_NORMALIZE_DATA": "0", + "CM_ML_MODEL_OUTPUT_LAYERS": "softmax_tensor", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "softmax_tensor", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "<<>>", + "CM_ML_MODEL_SUBTRACT_MEANS": "YES", + "CM_PACKAGE_URL": "https://zenodo.org/record/2535873/files/resnet50_v1.pb" + }, + "group": "framework" + }, + "tensorflow,fix-input-shape": { + "deps": [ + { + "names": [ + "tensorflow" + ], + "tags": "get,generic-python-lib,_package.tensorflow" + } + ], + "env": { + "CM_ML_MODEL_TF_FIX_INPUT_SHAPE": "yes" + } + }, + "tf": { + "alias": "tensorflow" + }, + "tflite": { + "env": { + "CM_ML_MODEL_ACCURACY": "76.456", + "CM_ML_MODEL_DATA_LAYOUT": "NHWC", + "CM_ML_MODEL_FRAMEWORK": "tflite", + "CM_ML_MODEL_GIVEN_CHANNEL_MEANS": "123.68 116.78 103.94", + "CM_ML_MODEL_INPUT_LAYERS": "input_tensor", + "CM_ML_MODEL_INPUT_LAYER_NAME": "input_tensor", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor 2\\\": (BATCH_SIZE, 224, 224, 3)", + "CM_ML_MODEL_NORMALIZE_DATA": "0", + "CM_ML_MODEL_OUTPUT_LAYERS": "softmax_tensor", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "softmax_tensor", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "<<>>", + "CM_ML_MODEL_SUBTRACT_MEANS": "YES" + }, + "group": "framework" + }, + "tflite,argmax": { + "env": { + "CM_DAE_EXTRACT_DOWNLOADED": "yes", + "CM_DOWNLOAD_FINAL_ENV_NAME": "", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_ML_MODEL_FILE": "resnet50_v1.tflite", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor 2\\\": (BATCH_SIZE, 224, 224, 3)", + "CM_PACKAGE_URL": "https://www.dropbox.com/s/cvv2zlfo80h54uz/resnet50_v1.tflite.gz?dl=1" + } + }, + "tflite,int8,no-argmax": { + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_ML_MODEL_FILE": "resnet50_quant_full_mlperf_edgetpu.tflite", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor 2\\\": (BATCH_SIZE, 224, 224, 3)", + "CM_PACKAGE_URL": "https://zenodo.org/record/8234946/files/resnet50_quant_full_mlperf_edgetpu.tflite?download=1" + } + }, + "tflite,no-argmax": { + "env": { + "CM_ML_MODEL_FILE": "resnet50_v1.no-argmax.tflite", + "CM_ML_MODEL_INPUT_SHAPES": "\\\"input_tensor 2\\\": (BATCH_SIZE, 224, 224, 3)", + "CM_PACKAGE_URL": "https://www.dropbox.com/s/vhuqo0wc39lky0a/resnet50_v1.no-argmax.tflite?dl=1" + } + }, + "uint8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "uint8", + "CM_ML_MODEL_PRECISION": "uint8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "uint8" + }, + "group": "precision" + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-resnet50/customize.py b/script/get-ml-model-resnet50/customize.py new file mode 100644 index 0000000000..4f30e94181 --- /dev/null +++ b/script/get-ml-model-resnet50/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": + i['run_script_input']['script_name'] = "run-fix-input" + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": + env['CM_ML_MODEL_STARTING_FILE_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), "resnet50_v1.pb") + + env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + env['CM_DOWNLOAD_PATH'] = os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']) + + return {'return':0} diff --git a/script/get-ml-model-resnet50/run-fix-input.sh b/script/get-ml-model-resnet50/run-fix-input.sh new file mode 100644 index 0000000000..5364b12333 --- /dev/null +++ b/script/get-ml-model-resnet50/run-fix-input.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +wget -nc https://raw.githubusercontent.com/krai/ck-mlperf/master/package/model-tf-mlperf-resnet/fix_input_shape.py +test $? -eq 0 || exit $? +${CM_PYTHON_BIN_WITH_PATH} "fix_input_shape.py" \ +--input_name "input_tensor" \ +--input_graph "${CM_ML_MODEL_FILE_WITH_PATH}" \ +--output_graph "resnet50_v1.pb" \ +--type b +test $? -eq 0 || exit $? diff --git a/script/get-ml-model-resnet50/run_config.yml b/script/get-ml-model-resnet50/run_config.yml new file mode 100644 index 0000000000..938e3b641b --- /dev/null +++ b/script/get-ml-model-resnet50/run_config.yml @@ -0,0 +1,6 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/script/get-ml-model-retinanet-nvidia/README.md b/script/get-ml-model-retinanet-nvidia/README.md new file mode 100644 index 0000000000..3f36251935 --- /dev/null +++ b/script/get-ml-model-retinanet-nvidia/README.md @@ -0,0 +1,171 @@ +Automatically generated README for this automation recipe: **get-ml-model-retinanet-nvidia** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-retinanet-nvidia,f059d249fac843ba) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,nvidia-retinanet,nvidia* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model nvidia-retinanet nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,nvidia-retinanet,nvidia` + +`cm run script --tags=get,ml-model,nvidia-retinanet,nvidia[,variations] ` + +*or* + +`cmr "get ml-model nvidia-retinanet nvidia"` + +`cmr "get ml-model nvidia-retinanet nvidia [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,nvidia-retinanet,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,nvidia-retinanet,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,nvidia-retinanet,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model nvidia-retinanet nvidia[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_efficient-nms` + - Environment variables: + - *CM_NVIDIA_EFFICIENT_NMS*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_polygraphy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_TORCH_DEVICE: `cpu` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlperf,training,src,_nvidia-retinanet + - CM script: [get-mlperf-training-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-src) + * get,mlperf,inference,src + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,ml-model,retinanet,_pytorch,_fp32,_weights + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + * get,generic-python-lib,_torch + * `if (CM_TORCH_DEVICE in cpu)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_mlperf_logging + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,cuda + * `if (CM_TORCH_DEVICE in cuda)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * get,generic-python-lib,_torch_cuda + * `if (CM_TORCH_DEVICE in cuda)` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,nvidia,mlperf,inference,common-code,-_custom + - CM script: [get-mlperf-inference-nvidia-common-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet-nvidia/_cm.json) + +___ +### Script output +`cmr "get ml-model nvidia-retinanet nvidia [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +* `CM_NVIDIA_RETINANET_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_ANCHOR_PATH` +* `CM_ML_MODEL_DYN_BATCHSIZE_PATH` +* `CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH` \ No newline at end of file diff --git a/script/get-ml-model-retinanet-nvidia/_cm.json b/script/get-ml-model-retinanet-nvidia/_cm.json new file mode 100644 index 0000000000..53e705e380 --- /dev/null +++ b/script/get-ml-model-retinanet-nvidia/_cm.json @@ -0,0 +1,84 @@ +{ + "alias": "get-ml-model-retinanet-nvidia", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "default_env": { + "CM_TORCH_DEVICE": "cpu" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,mlperf,training,src,_nvidia-retinanet" + }, + { + "tags": "get,mlperf,inference,src" + }, + { + "tags": "get,ml-model,retinanet,_pytorch,_fp32,_weights" + }, + { + "enable_if_env": { + "CM_TORCH_DEVICE": "cpu" + }, + "tags": "get,generic-python-lib,_torch" + }, + { + "tags": "get,generic-python-lib,_torchvision" + }, + { + "tags": "get,generic-python-lib,_mlperf_logging" + }, + { + "enable_if_env": { + "CM_TORCH_DEVICE": "cuda" + }, + "tags": "get,cuda" + }, + { + "enable_if_env": { + "CM_TORCH_DEVICE": "cuda" + }, + "tags": "get,generic-python-lib,_torch_cuda" + }, + { + "tags": "get,nvidia,mlperf,inference,common-code,-_custom" + } + ], + "new_env_keys": [ + "CM_NVIDIA_RETINANET_*", + "CM_ML_MODEL_*" + ], + "tags": [ + "get", + "ml-model", + "nvidia-retinanet", + "nvidia" + ], + "uid": "f059d249fac843ba", + "variations": { + "efficient-nms": { + "deps": [ + { + "tags": "get,generic-python-lib,_polygraphy" + } + ], + "env": { + "CM_NVIDIA_EFFICIENT_NMS": "yes" + } + } + }, + "print_env_at_the_end" : { + "CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-retinanet-nvidia/customize.py b/script/get-ml-model-retinanet-nvidia/customize.py new file mode 100644 index 0000000000..6da132316e --- /dev/null +++ b/script/get-ml-model-retinanet-nvidia/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + if '+PYTHONPATH' not in env: + env['+PYTHONPATH'] = [] + env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], "single_stage_detector", "ssd")) + env['CM_ML_MODEL_DYN_BATCHSIZE_PATH'] = os.path.join(os.getcwd(), "retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") + if "CM_NVIDIA_EFFICIENT_NMS" in env: + env['CM_NVIDIA_MODEL_PATCHED_PATH'] = os.path.join(os.getcwd(), "fpn_efficientnms_concatall.onnx") + env['CM_ML_MODEL_ANCHOR_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'], "code", "retinanet", "tensorrt", "onnx_generator", "retinanet_anchor_xywh_1x1.npy") + return {'return':0} + +def postprocess(i): + env = i['env'] + env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = os.path.join(os.getcwd(), "test_fpn_efficientnms_concatall.onnx") + if "CM_NVIDIA_EFFICIENT_NMS" in env: + env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = env['CM_NVIDIA_MODEL_PATCHED_PATH'] + return {'return':0} diff --git a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py new file mode 100644 index 0000000000..e076e4072e --- /dev/null +++ b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py @@ -0,0 +1,109 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import onnx +import argparse +import json +import re + +import onnx_graphsurgeon as gs +import numpy as np +import os + + +# in_onnx = "/work/code/retinanet/tensorrt/onnx_retina/ref_fpn_transreshapeconcat.onnx" +in_onnx = os.environ.get("CM_ML_MODEL_DYN_BATCHSIZE_PATH", "build/models/retinanet-resnext50-32x4d/new/retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") +out_onnx = os.environ.get("CM_NVIDIA_MODEL_PATCHED_PATH", "/work/code/retinanet/tensorrt/onnx_generator/test_fpn_efficientnms_concatall.onnx") +# Anchor at [1, 1] +anchor_xywh_1x1_npy = os.environ.get("CM_ML_MODEL_ANCHOR_PATH", "/work/code/retinanet/tensorrt/onnx_generator/retinanet_anchor_xywh_1x1.npy") + +graph = gs.import_onnx(onnx.load(in_onnx)) + +op = 'EfficientNMS_TRT' +node_name = 'efficientNMS' + +# (PluginField("score_threshold", nullptr, PluginFieldType::kFLOAT32, 1)); +# (PluginField("iou_threshold", nullptr, PluginFieldType::kFLOAT32, 1)); +# (PluginField("max_output_boxes", nullptr, PluginFieldType::kINT32, 1)); +# (PluginField("background_class", nullptr, PluginFieldType::kINT32, 1)); +# (PluginField("score_activation", nullptr, PluginFieldType::kINT32, 1)); +# (PluginField("box_coding", nullptr, PluginFieldType::kINT32, 1)); + +node_attrs = { + "background_class": -1, + "score_threshold" : 0.05, + "iou_threshold" : 0.5, + "max_output_boxes" : 1000, + "score_activation" : True, + "box_coding" : 1, +} +attrs = { + "plugin_version": "1", + "plugin_namespace": "", +} +attrs.update(node_attrs) + +anchors = np.load(anchor_xywh_1x1_npy) +print(f"anchors shape: {anchors.shape}, top 4: {anchors[0, :]}") +anchors = np.expand_dims(anchors, axis=0) +print(f"anchors shape: {anchors.shape}") + +anchor_tensor = gs.Constant(name="anchor", values=anchors) + +tensors = graph.tensors() + +# Add EfficientNMS layer +# output tensors +num_detections = gs.Variable(name="num_detections", + dtype=np.int32, + shape=["batch_size", 1]) +detection_boxes = gs.Variable(name="detection_boxes", + dtype=np.float32, + shape=["batch_size", 1000, 4]) +detection_scores = gs.Variable(name="detection_scores", + dtype=np.float32, + shape=["batch_size", 1000]) +detection_classes = gs.Variable(name="detection_classes", + dtype=np.int32, + shape=["batch_size", 1000]) + +nms_inputs = [tensors["bbox_regression"], tensors["cls_logits"], anchor_tensor] +nms_outputs = [num_detections, detection_boxes, detection_scores, detection_classes] + +graph.layer(op="EfficientNMS_TRT", + name="EfficientNMS", + inputs=nms_inputs, + outputs=nms_outputs, + attrs=attrs) + +# Add Concat plugin to concat all 4 tensors +concat_final_output = gs.Variable(name="concat_final_output", + dtype=np.float32, + shape=["batch_size", 7001]) +attrs = { + "plugin_version": "1", + "plugin_namespace": "", +} +graph.layer(op="RetinanetConcatNmsOutputsPlugin", + name="RetinanetConcatNmsOutputsPlugin", + inputs=[num_detections, detection_boxes, detection_scores, detection_classes], + outputs=[concat_final_output], + attrs=attrs) + +graph.outputs = [concat_final_output] + +graph.cleanup().toposort() + +onnx.save_model(gs.export_onnx(graph), out_onnx) diff --git a/script/get-ml-model-retinanet-nvidia/polygraphy_script.sh b/script/get-ml-model-retinanet-nvidia/polygraphy_script.sh new file mode 100644 index 0000000000..b992aa171b --- /dev/null +++ b/script/get-ml-model-retinanet-nvidia/polygraphy_script.sh @@ -0,0 +1,24 @@ +# Set these parameters +RAW_ONNX_PATH=$1 +FOLDED_ONNX_PATH=$2 +BACKEND_ONNX_PATH=$3 +NMS_ONNX_PATH=$4 + +bbox_concat_node="bbox_regression" +classification_concat_node="cls_logits" + + +# Run once to install the dependencies. For some reason, this messes up Polygraphy's auto-fold loop, so we need to run a +# second time. +POLYGRAPHY_AUTOINSTALL_DEPS=1 polygraphy surgeon sanitize --fold-constants $RAW_ONNX_PATH -o $FOLDED_ONNX_PATH +polygraphy surgeon sanitize --fold-constants $RAW_ONNX_PATH -o $FOLDED_ONNX_PATH + +# Extract backend +polygraphy surgeon extract $FOLDED_ONNX_PATH \ + --outputs ${bbox_concat_node}:auto ${classification_concat_node}:auto \ + -o $BACKEND_ONNX_PATH + +# Extract NMS head +polygraphy surgeon extract $FOLDED_ONNX_PATH \ + --inputs ${classification_concat_node}:[batch_size,120087,264]:auto ${bbox_concat_node}:[batch_size,120087,4]:auto \ + -o $NMS_ONNX_PATH diff --git a/script/get-ml-model-retinanet-nvidia/run.sh b/script/get-ml-model-retinanet-nvidia/run.sh new file mode 100644 index 0000000000..592509b670 --- /dev/null +++ b/script/get-ml-model-retinanet-nvidia/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash +#${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_INFERENCE_VISION_PATH}/tools/retinanet_pytorch_to_onnx.py --weights ${CM_ML_MODEL_FILE_WITH_PATH} +cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_TRAINING_SOURCE}/single_stage_detector/ssd/pth_to_onnx.py --num-classes 264 --image-size 800 800 --input ${CM_ML_MODEL_FILE_WITH_PATH} --output retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx --device ${CM_TORCH_DEVICE}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? +if [[ ${CM_NVIDIA_EFFICIENT_NMS} == "yes" ]]; then + cmd="bash ${CM_TMP_CURRENT_SCRIPT_PATH}/polygraphy_script.sh retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx folded.onnx backend.onnx nms.onnx" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? + cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/nvidia_patch_retinanet_efficientnms.py" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? +fi diff --git a/script/get-ml-model-retinanet/README-extra.md b/script/get-ml-model-retinanet/README-extra.md new file mode 100644 index 0000000000..db25a86577 --- /dev/null +++ b/script/get-ml-model-retinanet/README-extra.md @@ -0,0 +1,16 @@ +# Get ML Model Retinanet +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the Retinanet model and adds it to CM cache with relevant meta data. + +## How To +```bash +cm run script --tags=get,ml-model,retinanet,_[VARIATION] +``` +where, +* `[VARIATION]` is one of `onnx-fp32`, `pytorch-fp32` or `pytorch-fp32-weights`. + +## Exported Variables +* `CM_ML_MODEL_FILE:` Model filename +* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* More env variables being exported are given in [cm.json file](_cm.json) + diff --git a/script/get-ml-model-retinanet/README.md b/script/get-ml-model-retinanet/README.md new file mode 100644 index 0000000000..ab36ac1848 --- /dev/null +++ b/script/get-ml-model-retinanet/README.md @@ -0,0 +1,226 @@ +Automatically generated README for this automation recipe: **get-ml-model-retinanet** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-retinanet,427bc5665e4541c2) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,raw,resnext50,retinanet,object-detection* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model raw resnext50 retinanet object-detection" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,raw,resnext50,retinanet,object-detection` + +`cm run script --tags=get,ml-model,raw,resnext50,retinanet,object-detection[,variations] ` + +*or* + +`cmr "get ml-model raw resnext50 retinanet object-detection"` + +`cmr "get ml-model raw resnext50 retinanet object-detection [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,resnext50,retinanet,object-detection' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,raw,resnext50,retinanet,object-detection"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,raw,resnext50,retinanet,object-detection) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model raw resnext50 retinanet object-detection[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_no-nms` + - Environment variables: + - *CM_TMP_ML_MODEL_RETINANET_NO_NMS*: `yes` + - *CM_ML_MODEL_RETINANET_NO_NMS*: `yes` + - *CM_QAIC_PRINT_NODE_PRECISION_INFO*: `yes` + - Workflow: + * `_onnx,fp32` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/6617879/files/resnext50_32x4d_fpn.onnx` + - *CM_DOWNLOAD_CHECKSUM*: `4544f4e56e0a4684215831cc937ea45c` + - *CM_ML_MODEL_ACCURACY*: `0.3757` + - Workflow: + * `_onnx,no-nms` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,python3 + * CM names: `--adr.['python, python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_package.onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.onnxsim + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * download,file,_url.https://raw.githubusercontent.com/arjunsuresh/ck-qaic/main/package/model-onnx-mlperf-retinanet-no-nms/remove-nms-and-extract-priors.patch + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * get,git,repo,_repo.https://github.com/mlcommons/training.git,_patch + * CM names: `--adr.['mlperf-training-src']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * get,ml-model,retinanet,_pytorch,_fp32,_weights + * CM names: `--adr.['pytorch-weights']...` + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + * get,generic-python-lib,_package.torch + * CM names: `--adr.['torch', 'pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_pytorch,fp32` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` + - *CM_ML_MODEL_ACCURACY*: `0.3755` + - Workflow: + * `_pytorch,fp32,weights` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/6605272/files/retinanet_model_10.zip?download=1` + - *CM_UNZIP*: `yes` + - *CM_ML_MODEL_FILE*: `retinanet_model_10.pth` + - *CM_ML_MODEL_ACCURACY*: `0.3755` + - Workflow: + * `_weights` + - Environment variables: + - *CM_MODEL_WEIGHTS_FILE*: `yes` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_onnx`** (default) + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NCHW` + - *CM_ML_MODEL_FRAMEWORK*: `onnx` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_ML_MODEL_DATA_LAYOUT*: `NCHW` + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_onnx` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet/_cm.json)*** + * download-and-extract + * `if (CM_TMP_ML_MODEL_RETINANET_NO_NMS != yes)` + * CM names: `--adr.['dae']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + * [run-no-nms.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet/run-no-nms.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-retinanet/_cm.json) + +___ +### Script output +`cmr "get ml-model raw resnext50 retinanet object-detection [,variations]" -j` +#### New environment keys (filter) + +* `<<>>` +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_RETINANET_QAIC_NODE_PRECISION_INFO_FILE_PATH` \ No newline at end of file diff --git a/script/get-ml-model-retinanet/_cm.json b/script/get-ml-model-retinanet/_cm.json new file mode 100644 index 0000000000..4b38625120 --- /dev/null +++ b/script/get-ml-model-retinanet/_cm.json @@ -0,0 +1,176 @@ +{ + "alias": "get-ml-model-retinanet", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML models", + "cache": true, + "env": { + "CM_ML_MODEL": "retinanet", + "CM_ML_MODEL_DATASET": "open-images", + "CM_ML_MODEL_IMAGE_HEIGHT": "800", + "CM_ML_MODEL_IMAGE_WIDTH": "800", + "CM_ML_MODEL_NORMALIZE_DATA": "yes", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_SUBTRACT_MEANS": "yes", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "new_env_keys": [ + "CM_ML_MODEL_*", + "<<>>" + ], + "tags": [ + "get", + "ml-model", + "raw", + "resnext50", + "retinanet", + "object-detection" + ], + "uid": "427bc5665e4541c2", + "prehook_deps": [ + { + "tags": "download-and-extract", + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH" + }, + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_PACKAGE_URL" ] + }, + "names": [ + "dae" + ], + "skip_if_env": { + "CM_TMP_ML_MODEL_RETINANET_NO_NMS": [ + "yes" + ] + } + } + ], + "variations": { + "fp32": { + "group": "precision", + "default": true, + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + } + }, + "onnx": { + "group": "framework", + "default": true, + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NCHW", + "CM_ML_MODEL_FRAMEWORK": "onnx" + } + }, + "pytorch": { + "group": "framework", + "env": { + "CM_ML_MODEL_DATA_LAYOUT": "NCHW", + "CM_ML_MODEL_FRAMEWORK": "pytorch" + } + }, + "onnx,fp32": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/6617879/files/resnext50_32x4d_fpn.onnx", + "CM_DOWNLOAD_CHECKSUM": "4544f4e56e0a4684215831cc937ea45c", + "CM_ML_MODEL_ACCURACY": "0.3757" + }, + "required_disk_space": 150, + "warning":"This model is downloaded from Zenodo.org" + }, + "pytorch,fp32": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth", + "CM_ML_MODEL_ACCURACY": "0.3755" + } + + }, + "weights": { + "env": { + "CM_MODEL_WEIGHTS_FILE": "yes" + } + }, + "pytorch,fp32,weights": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/6605272/files/retinanet_model_10.zip?download=1", + "CM_UNZIP": "yes", + "CM_ML_MODEL_FILE": "retinanet_model_10.pth", + "CM_ML_MODEL_ACCURACY": "0.3755" + }, + "add_deps_recursive": { + "dae": { + "tags": "_extract" + } + } + }, + "no-nms": { + "env": { + "CM_TMP_ML_MODEL_RETINANET_NO_NMS": "yes", + "CM_ML_MODEL_RETINANET_NO_NMS": "yes", + "CM_QAIC_PRINT_NODE_PRECISION_INFO": "yes" + } + }, + "onnx,no-nms": { + "env": { + }, + "deps": [ + { + "tags": "get,python3", + "names": [ + "python, python3" + ] + }, + { + "tags": "get,generic-python-lib,_package.onnx" + }, + { + "tags": "get,generic-python-lib,_package.onnxsim" + }, + { + "tags": "download,file,_url.https://raw.githubusercontent.com/arjunsuresh/ck-qaic/main/package/model-onnx-mlperf-retinanet-no-nms/remove-nms-and-extract-priors.patch", + "extra_cache_tags": "retinanet,training,patch,file", + "force_cache": true, + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_RETINANET_NO_NMS_PATCH_FILE_PATH" + } + }, + { + "tags": "get,git,repo,_repo.https://github.com/mlcommons/training.git,_patch", + "extra_cache_tags": "training,src,mlperf,patched", + "names": [ + "mlperf-training-src" + ], + "env": { + "CM_GIT_PATCH_FILEPATHS": "<<>>", + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_TRAINING_REPO_PATCHED_PATH" + } + }, + { + "tags": "get,ml-model,retinanet,_pytorch,_fp32,_weights", + "names": [ + "pytorch-weights" + ], + "env": { + "CM_ENV_NAME_ML_MODEL_FILE": "CM_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH" + } + }, + { + "tags": "get,generic-python-lib,_package.torch", + "names": [ + "torch", + "pytorch" + ], + "version_min": "1.13.1" + } + ] + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model", + "CM_ML_MODEL_ACCURACY": "Model accuracy" + } +} diff --git a/script/get-ml-model-retinanet/customize.py b/script/get-ml-model-retinanet/customize.py new file mode 100644 index 0000000000..cc875212a4 --- /dev/null +++ b/script/get-ml-model-retinanet/customize.py @@ -0,0 +1,30 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get('CM_TMP_ML_MODEL_RETINANET_NO_NMS', '') == 'yes': + i['run_script_input']['script_name'] = "run-no-nms" + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), "retinanet.onnx") + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + if env.get('CM_ENV_NAME_ML_MODEL_FILE', '') != '': + env[env['CM_ENV_NAME_ML_MODEL_FILE']] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + if env.get("CM_QAIC_PRINT_NODE_PRECISION_INFO", '') == 'yes': + env['CM_ML_MODEL_RETINANET_QAIC_NODE_PRECISION_INFO_FILE_PATH'] = os.path.join(os.getcwd(), 'node-precision-info.yaml') + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return':0} + diff --git a/script/get-ml-model-retinanet/node-precision-info.py b/script/get-ml-model-retinanet/node-precision-info.py new file mode 100644 index 0000000000..100a64ecbb --- /dev/null +++ b/script/get-ml-model-retinanet/node-precision-info.py @@ -0,0 +1,69 @@ +import onnx +import os +import sys +import argparse +import yaml + +def parse_args(add_help=True): + parser = argparse.ArgumentParser(description='Print node precision info for the onnx file', add_help=add_help) + parser.add_argument('--input', default="retinanet.onnx", help='input onnx file') + parser.add_argument('--output', default="node-precision.yaml", help='output node precision file') + args = parser.parse_args() + + return args + +def main(args): + + onnx_model = onnx.load(args.input) + list1 = [ + "/backbone/fpn/inner_blocks.0/Conv_output_0", + "/head/classification_head/Sigmoid_output_0", + "/head/classification_head/Sigmoid_1_output_0", + "/head/classification_head/Sigmoid_2_output_0", + "/head/classification_head/Sigmoid_3_output_0", + "/head/classification_head/Sigmoid_4_output_0" + ] + list2 = [ + "1421", + "1481", + "1517", + "1553", + "1589", + "1625", + ] + + #check which list of node names is valid + node_names = [] + valid_list = None + + #for n in enumerate_model_node_outputs(onnx_model): + for n in onnx_model.graph.node: + node_names.append(n.output[0]) + + if set(list1) < set(node_names): + valid_list = list1 + elif set(list2) < set(node_names): + valid_list = list2 + else: + print("Node names are not matching with the expected ones in the input onnx file.") + sys.exit(1) + + node_precision_info = {} + node_precision_info['FP16NodeInstanceNames'] = [] + + fp16nodes = valid_list + fp16nodes += [ "boxes_1", "boxes_2", "boxes_3", "boxes_4", "boxes_5", "scores_1", "scores_2", "scores_3", "scores_4", "scores_5"] + + #node_precision_info['FP16NodeInstanceNames'] = "["+", ".join(fp16nodes)+"]" + node_precision_info['FP16NodeInstanceNames'] = fp16nodes + + yaml_output = yaml.safe_dump(node_precision_info, default_style=None) + with open(args.output, "w") as f: + f.write(yaml_output) + + print(f"Node precision info successfully printed out to {args.output}") + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/script/get-ml-model-retinanet/run-no-nms.sh b/script/get-ml-model-retinanet/run-no-nms.sh new file mode 100644 index 0000000000..48be9d1e6e --- /dev/null +++ b/script/get-ml-model-retinanet/run-no-nms.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +cmd="PYTHONPATH=${PYTHONPATH}:${CM_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/ssd/ ${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/scripts/pth_to_onnx.py --input ${CM_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH} --output $PWD/retinanet.onnx --image-size 800 800" +run "$cmd" + +if [[ ${CM_QAIC_PRINT_NODE_PRECISION_INFO} == "yes" ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/node-precision-info.py --input $PWD/retinanet.onnx --output $PWD/node-precision-info.yaml" + run "$cmd" +fi diff --git a/script/get-ml-model-rnnt/README.md b/script/get-ml-model-rnnt/README.md new file mode 100644 index 0000000000..66a5a6d03b --- /dev/null +++ b/script/get-ml-model-rnnt/README.md @@ -0,0 +1,194 @@ +Automatically generated README for this automation recipe: **get-ml-model-rnnt** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-rnnt,8858f18b89774d28) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-rnnt)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,rnnt,raw,librispeech,speech-recognition* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model rnnt raw librispeech speech-recognition" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,rnnt,raw,librispeech,speech-recognition` + +`cm run script --tags=get,ml-model,rnnt,raw,librispeech,speech-recognition[,variations] ` + +*or* + +`cmr "get ml-model rnnt raw librispeech speech-recognition"` + +`cmr "get ml-model rnnt raw librispeech speech-recognition [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,rnnt,raw,librispeech,speech-recognition' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,rnnt,raw,librispeech,speech-recognition"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,rnnt,raw,librispeech,speech-recognition) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model rnnt raw librispeech speech-recognition[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_pytorch,fp32` + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `0.07452253714852645` + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1` + - Workflow: + * `_pytorch,fp32,amazon-s3` + - Environment variables: + - *CM_PACKAGE_URL*: `https://mlperf-public.s3.us-west-2.amazonaws.com/DistributedDataParallel_1576581068.9962234-epoch-100.pt` + - Workflow: + * `_pytorch,fp32,zenodo` + - Environment variables: + - *CM_PACKAGE_URL*: `https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1` + - Workflow: + * `_weights` + - Environment variables: + - *CM_MODEL_WEIGHTS_FILE*: `yes` + - Workflow: + +
+ + + * Group "**download-src**" +
+ Click here to expand this section. + + * **`_amazon-s3`** (default) + - Workflow: + * `_zenodo` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + +
+ + +#### Default variations + +`_amazon-s3,_fp32,_pytorch` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-rnnt/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-rnnt/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-rnnt/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-rnnt/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-rnnt/_cm.json) + +___ +### Script output +`cmr "get ml-model rnnt raw librispeech speech-recognition [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_PATH` \ No newline at end of file diff --git a/script/get-ml-model-rnnt/_cm.json b/script/get-ml-model-rnnt/_cm.json new file mode 100644 index 0000000000..ea149ea026 --- /dev/null +++ b/script/get-ml-model-rnnt/_cm.json @@ -0,0 +1,74 @@ +{ + "alias": "get-ml-model-rnnt", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "rnnt", + "CM_ML_MODEL_DATASET": "librispeech", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "new_env_keys": [ + "CM_ML_MODEL_*" + ], + "tags": [ + "get", + "ml-model", + "rnnt", + "raw", + "librispeech", + "speech-recognition" + ], + "uid": "8858f18b89774d28", + "variations": { + "fp32": { + "default": true, + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision" + }, + "pytorch": { + "env": { + "CM_ML_MODEL_FRAMEWORK": "pytorch" + }, + "group": "framework", + "default": true + }, + "pytorch,fp32": { + "env": { + "CM_ML_MODEL_ACCURACY": "0.07452253714852645", + "CM_PACKAGE_URL": "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1" + } + }, + "weights": { + "env": { + "CM_MODEL_WEIGHTS_FILE": "yes" + } + }, + "pytorch,fp32,amazon-s3": { + "env": { + "CM_PACKAGE_URL": "https://mlperf-public.s3.us-west-2.amazonaws.com/DistributedDataParallel_1576581068.9962234-epoch-100.pt" + } + }, + "pytorch,fp32,zenodo": { + "env": { + "CM_PACKAGE_URL": "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1" + } + }, + "zenodo": { + "group": "download-src" + }, + "amazon-s3": { + "group": "download-src", + "default": true + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-rnnt/customize.py b/script/get-ml-model-rnnt/customize.py new file mode 100644 index 0000000000..65961f1565 --- /dev/null +++ b/script/get-ml-model-rnnt/customize.py @@ -0,0 +1,38 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_PACKAGE_URL'] + + print ('Downloading from {}'.format(url)) + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url}) + if r['return']>0: return r + + filename = r['filename'] + + if env.get('CM_UNZIP') == "yes": + os.system("unzip "+filename) + filename = env['CM_ML_MODEL_FILE'] + env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + else: + # Add to path + env['CM_ML_MODEL_FILE']=filename + env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] + + env['CM_ML_MODEL_PATH']=path + + return {'return':0} diff --git a/script/get-ml-model-stable-diffusion/README.md b/script/get-ml-model-stable-diffusion/README.md new file mode 100644 index 0000000000..7c89c90c4f --- /dev/null +++ b/script/get-ml-model-stable-diffusion/README.md @@ -0,0 +1,256 @@ +Automatically generated README for this automation recipe: **get-ml-model-stable-diffusion** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-stable-diffusion,22c6516b2d4d4c23) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-stable-diffusion)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,raw,ml-model,stable-diffusion,sdxl,text-to-image* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get raw ml-model stable-diffusion sdxl text-to-image" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,raw,ml-model,stable-diffusion,sdxl,text-to-image` + +`cm run script --tags=get,raw,ml-model,stable-diffusion,sdxl,text-to-image[,variations] [--input_flags]` + +*or* + +`cmr "get raw ml-model stable-diffusion sdxl text-to-image"` + +`cmr "get raw ml-model stable-diffusion sdxl text-to-image [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,stable-diffusion,sdxl,text-to-image' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,raw,ml-model,stable-diffusion,sdxl,text-to-image"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,raw,ml-model,stable-diffusion,sdxl,text-to-image) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get raw ml-model stable-diffusion sdxl text-to-image[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_ML_MODEL_BATCH_SIZE*: `#` + - Workflow: + * `_pytorch,fp16` + - Workflow: + * `_pytorch,fp32` + - Environment variables: + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0` + - Workflow: + * `_rclone,fp16` + - Environment variables: + - *CM_DOWNLOAD_URL*: `mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp16` + - Workflow: + * `_rclone,fp32` + - Environment variables: + - *CM_DOWNLOAD_URL*: `mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp32` + - Workflow: + +
+ + + * Group "**download-source**" +
+ Click here to expand this section. + + * `_huggingface` + - Workflow: + * **`_mlcommons`** (default) + - Workflow: + +
+ + + * Group "**download-tool**" +
+ Click here to expand this section. + + * `_git` + - Environment variables: + - *CM_DOWNLOAD_TOOL*: `git` + - Workflow: + * `_rclone` + - Environment variables: + - *CM_RCLONE_CONFIG_CMD*: `rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com` + - *CM_DOWNLOAD_TOOL*: `rclone` + - Workflow: + * `_wget` + - Environment variables: + - *CM_DOWNLOAD_TOOL*: `wget` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_ML_MODEL_FRAMEWORK*: `pytorch` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_fp16` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp16` + - *CM_ML_MODEL_PRECISION*: `fp16` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp16` + - Workflow: + * **`_fp32`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `int8` + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `uint8` + - *CM_ML_MODEL_PRECISION*: `uint8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `uint8` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_mlcommons,_pytorch` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--checkpoint=value` → `SDXL_CHECKPOINT_PATH=value` +* `--download_path=value` → `CM_DOWNLOAD_PATH=value` +* `--to=value` → `CM_DOWNLOAD_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "checkpoint":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-stable-diffusion/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-stable-diffusion/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-stable-diffusion/_cm.json)*** + * get,ml-model,huggingface,zoo,_clone-repo,_model-stub.stabilityai/stable-diffusion-xl-base-1.0 + * `if (CM_TMP_REQUIRE_DOWNLOAD == yes AND CM_DOWNLOAD_TOOL == git)` + * CM names: `--adr.['hf-zoo']...` + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + * download-and-extract + * `if (CM_TMP_REQUIRE_DOWNLOAD == yes AND CM_DOWNLOAD_TOOL == rclone)` + * CM names: `--adr.['dae']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-stable-diffusion/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-stable-diffusion/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-stable-diffusion/_cm.json) + +___ +### Script output +`cmr "get raw ml-model stable-diffusion sdxl text-to-image [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +* `SDXL_CHECKPOINT_PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-ml-model-stable-diffusion/_cm.json b/script/get-ml-model-stable-diffusion/_cm.json new file mode 100644 index 0000000000..39390a193d --- /dev/null +++ b/script/get-ml-model-stable-diffusion/_cm.json @@ -0,0 +1,186 @@ +{ + "alias": "get-ml-model-stable-diffusion", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "SDXL", + "CM_ML_MODEL_DATASET": "openorca", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "input_mapping": { + "checkpoint": "SDXL_CHECKPOINT_PATH", + "download_path": "CM_DOWNLOAD_PATH", + "to": "CM_DOWNLOAD_PATH" + }, + "new_env_keys": [ + "CM_ML_MODEL_*", + "SDXL_CHECKPOINT_PATH" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_TMP_REQUIRE_DOWNLOAD": [ + "yes" + ], + "CM_DOWNLOAD_TOOL": [ + "git" + ] + }, + "env": { + "CM_MODEL_ZOO_ENV_KEY": "SDXL", + "CM_GIT_CHECKOUT_FOLDER": "stable-diffusion-xl-base-1.0" + }, + "force_env_keys": [ + "CM_GIT_CHECKOUT_FOLDER" + ], + "names": [ + "hf-zoo" + ], + "tags": "get,ml-model,huggingface,zoo,_clone-repo,_model-stub.stabilityai/stable-diffusion-xl-base-1.0" + }, + { + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_PATH" + }, + "tags": "download-and-extract", + "update_tags_from_env_with_prefix": { + "_url.": [ + "CM_DOWNLOAD_URL" + ] + }, + "enable_if_env": { + "CM_TMP_REQUIRE_DOWNLOAD": [ "yes" ], + "CM_DOWNLOAD_TOOL": [ + "rclone" + ] + }, + "force_cache": true, + "extra_cache_tags": "stable-diffusion,sdxl,model", + "names": [ + "dae" + ] + } + ], + "tags": [ + "get", + "raw", + "ml-model", + "stable-diffusion", + "sdxl", + "text-to-image" + ], + "uid": "22c6516b2d4d4c23", + "variations": { + "batch_size.#": { + "env": { + "CM_ML_MODEL_BATCH_SIZE": "#" + } + }, + "fp32": { + "default": true, + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision" + }, + "fp16": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp16", + "CM_ML_MODEL_PRECISION": "fp16", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp16" + }, + "group": "precision" + }, + "int8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "int8", + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "int8" + }, + "group": "precision" + }, + "pytorch": { + "default": true, + "env": { + "CM_ML_MODEL_FRAMEWORK": "pytorch" + }, + "group": "framework" + }, + "pytorch,fp16": { + "required_disk_space": 6500 + }, + "pytorch,fp32": { + "env": { + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" + }, + "required_disk_space": 13000 + }, + "uint8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "uint8", + "CM_ML_MODEL_PRECISION": "uint8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "uint8" + }, + "group": "precision" + }, + "huggingface": { + "group": "download-source", + "default_variations": { + "download-tool": "git" + } + }, + "mlcommons": { + "group": "download-source", + "default": true, + "default_variations": { + "download-tool": "rclone" + } + }, + "git": { + "group": "download-tool", + "env": { + "CM_DOWNLOAD_TOOL": "git" + } + }, + "wget": { + "group": "download-tool", + "env": { + "CM_DOWNLOAD_TOOL": "wget" + }, + "adr": { + "dae": { + "tags": "_wget" + } + } + }, + "rclone": { + "group": "download-tool", + "env": { + "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", + "CM_DOWNLOAD_TOOL": "rclone" + }, + "adr": { + "dae": { + "tags": "_rclone" + } + } + }, + "rclone,fp32": { + "env": { + "CM_DOWNLOAD_URL": "mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp32" + } + }, + "rclone,fp16": { + "env": { + "CM_DOWNLOAD_URL": "mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp16" + } + } + }, + "print_env_at_the_end" : { + "SDXL_CHECKPOINT_PATH": "Stable diffusion checkpoint path" + } +} diff --git a/script/get-ml-model-stable-diffusion/customize.py b/script/get-ml-model-stable-diffusion/customize.py new file mode 100644 index 0000000000..6f001eb274 --- /dev/null +++ b/script/get-ml-model-stable-diffusion/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + path = env.get('SDXL_CHECKPOINT_PATH', '').strip() + + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['SDXL_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + + return {'return':0} diff --git a/script/get-ml-model-tiny-resnet/README.md b/script/get-ml-model-tiny-resnet/README.md new file mode 100644 index 0000000000..2b9bfe1b31 --- /dev/null +++ b/script/get-ml-model-tiny-resnet/README.md @@ -0,0 +1,214 @@ +Automatically generated README for this automation recipe: **get-ml-model-tiny-resnet** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-tiny-resnet,dd5ec11c3f6e49eb) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification` + +`cm run script --tags=get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification[,variations] ` + +*or* + +`cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification"` + +`cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_ML_MODEL_BATCH_SIZE*: `#` + - Workflow: + * `_tflite,int8` + - Environment variables: + - *CM_PACKAGE_URL*: `https://github.com/mlcommons/tiny/raw/master/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite` + - *CM_DOWNLOAD_CHECKSUM*: `2d6dd48722471313e4c4528249205ae3` + - Workflow: + +
+ + + * Group "**framework**" +
+ Click here to expand this section. + + * `_onnx` + - Environment variables: + - *CM_TMP_ML_MODEL_TF2ONNX*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,python3 + * CM names: `--adr.['python,python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,tiny,model,resnet,_tflite + * CM names: `--adr.['tflite-resnet-model', 'dependent-model']...` + - CM script: [get-ml-model-tiny-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-tiny-resnet) + * get,generic-python-lib,_package.tf2onnx + * CM names: `--adr.['tf2onnx']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * **`_tflite`** (default) + - Environment variables: + - *CM_ML_MODEL_ACCURACY*: `85` + - *CM_ML_MODEL_DATA_LAYOUT*: `NHWC` + - *CM_ML_MODEL_FRAMEWORK*: `tflite` + - *CM_ML_MODEL_GIVEN_CHANNEL_MEANS*: `` + - *CM_ML_MODEL_INPUT_LAYERS*: `` + - *CM_ML_MODEL_INPUT_LAYER_NAME*: `` + - *CM_ML_MODEL_INPUT_SHAPES*: `` + - *CM_ML_MODEL_NORMALIZE_DATA*: `0` + - *CM_ML_MODEL_OUTPUT_LAYERS*: `` + - *CM_ML_MODEL_OUTPUT_LAYER_NAME*: `` + - *CM_ML_MODEL_STARTING_WEIGHTS_FILENAME*: `<<>>` + - *CM_ML_MODEL_SUBTRACT_MEANS*: `YES` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_fp32` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `fp32` + - *CM_ML_MODEL_PRECISION*: `fp32` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `fp32` + - Workflow: + * **`_int8`** (default) + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `int8` + - *CM_ML_MODEL_PRECISION*: `int8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `int8` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_ML_MODEL_INPUT_DATA_TYPES*: `uint8` + - *CM_ML_MODEL_PRECISION*: `uint8` + - *CM_ML_MODEL_WEIGHT_DATA_TYPES*: `uint8` + - Workflow: + +
+ + +#### Default variations + +`_int8,_tflite` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet/_cm.json)*** + * download-and-extract + * `if (CM_PACKAGE_URL == on)` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-tiny-resnet/_cm.json) + +___ +### Script output +`cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` \ No newline at end of file diff --git a/script/get-ml-model-tiny-resnet/_cm.json b/script/get-ml-model-tiny-resnet/_cm.json new file mode 100644 index 0000000000..e9a57d6758 --- /dev/null +++ b/script/get-ml-model-tiny-resnet/_cm.json @@ -0,0 +1,144 @@ +{ + "alias": "get-ml-model-tiny-resnet", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_ML_MODEL": "RESNET", + "CM_ML_MODEL_DATASET": "cifar-10", + "CM_ML_MODEL_IMAGE_HEIGHT": "32", + "CM_ML_MODEL_IMAGE_WIDTH": "32", + "CM_ML_MODEL_NORMALIZE_DATA": "0", + "CM_ML_MODEL_RETRAINING": "no", + "CM_ML_MODEL_SUBTRACT_MEANS": "YES", + "CM_ML_MODEL_WEIGHT_TRANSFORMATIONS": "no" + }, + "new_env_keys": [ + "CM_ML_MODEL_*" + ], + "prehook_deps": [ + { + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>" + }, + "tags": "download-and-extract", + "update_tags_from_env_with_prefix": { + "_url.": [ + "CM_PACKAGE_URL" + ] + }, + "enable_if_env": { + "CM_PACKAGE_URL": [ "on" ] + } + } + ], + "tags": [ + "get", + "raw", + "ml-model", + "resnet", + "pretrained", + "tiny", + "model", + "ic", + "ml-model-tiny-resnet", + "image-classification" + ], + "uid": "dd5ec11c3f6e49eb", + "variations": { + "batch_size.#": { + "env": { + "CM_ML_MODEL_BATCH_SIZE": "#" + } + }, + "fp32": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "fp32", + "CM_ML_MODEL_PRECISION": "fp32", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "fp32" + }, + "group": "precision", + "add_deps_tags": { + "dependent-model": { + "tags": "_int8" + } + } + }, + "int8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "int8", + "CM_ML_MODEL_PRECISION": "int8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "int8" + }, + "group": "precision", + "default": true, + "add_deps_recursive": { + "dependent-model": { + "tags": "_int8" + } + } + }, + "tflite": { + "env": { + "CM_ML_MODEL_ACCURACY": "85", + "CM_ML_MODEL_DATA_LAYOUT": "NHWC", + "CM_ML_MODEL_FRAMEWORK": "tflite", + "CM_ML_MODEL_GIVEN_CHANNEL_MEANS": "", + "CM_ML_MODEL_INPUT_LAYERS": "", + "CM_ML_MODEL_INPUT_LAYER_NAME": "", + "CM_ML_MODEL_INPUT_SHAPES": "", + "CM_ML_MODEL_NORMALIZE_DATA": "0", + "CM_ML_MODEL_OUTPUT_LAYERS": "", + "CM_ML_MODEL_OUTPUT_LAYER_NAME": "", + "CM_ML_MODEL_STARTING_WEIGHTS_FILENAME": "<<>>", + "CM_ML_MODEL_SUBTRACT_MEANS": "YES" + }, + "group": "framework", + "default": true + }, + "tflite,int8": { + "env": { + "CM_PACKAGE_URL": "https://github.com/mlcommons/tiny/raw/master/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite", + "CM_DOWNLOAD_CHECKSUM": "2d6dd48722471313e4c4528249205ae3" + } + }, + "onnx": { + "deps": [ + { + "names": [ "python,python3" ], + "tags": "get,python3" + }, + { + "names": [ "tflite-resnet-model", "dependent-model" ], + "tags": "get,tiny,model,resnet,_tflite" + }, + { + "names": [ "tf2onnx" ], + "tags": "get,generic-python-lib,_package.tf2onnx" + } + ], + "env": { + "CM_TMP_ML_MODEL_TF2ONNX": "yes" + }, + "group": "framework" + }, + "uint8": { + "env": { + "CM_ML_MODEL_INPUT_DATA_TYPES": "uint8", + "CM_ML_MODEL_PRECISION": "uint8", + "CM_ML_MODEL_WEIGHT_DATA_TYPES": "uint8" + }, + "group": "precision", + "add_deps_tags": { + "dependent-model": { + "tags": "_int8" + } + } + } + }, + "print_env_at_the_end" : { + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" + } +} diff --git a/script/get-ml-model-tiny-resnet/customize.py b/script/get-ml-model-tiny-resnet/customize.py new file mode 100644 index 0000000000..4e690eaf15 --- /dev/null +++ b/script/get-ml-model-tiny-resnet/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get("CM_TMP_ML_MODEL_TF2ONNX", "") == "yes": + outputfile = env.get('CM_ML_MODEL_OUTFILE', 'model_quant.onnx') + env['CM_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + " -m tf2onnx.convert --tflite " + env['CM_ML_MODEL_FILE_WITH_PATH'] + " --output " + outputfile + " --inputs-as-nchw \"input_1_int8\"" + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), outputfile) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return':0} + diff --git a/script/get-ml-model-tiny-resnet/run.sh b/script/get-ml-model-tiny-resnet/run.sh new file mode 100644 index 0000000000..e935cf158c --- /dev/null +++ b/script/get-ml-model-tiny-resnet/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit 1 diff --git a/script/get-ml-model-tiny-resnet/run_config.yml b/script/get-ml-model-tiny-resnet/run_config.yml new file mode 100644 index 0000000000..938e3b641b --- /dev/null +++ b/script/get-ml-model-tiny-resnet/run_config.yml @@ -0,0 +1,6 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/script/get-ml-model-using-imagenet-from-model-zoo/README.md b/script/get-ml-model-using-imagenet-from-model-zoo/README.md new file mode 100644 index 0000000000..e92f5fdf52 --- /dev/null +++ b/script/get-ml-model-using-imagenet-from-model-zoo/README.md @@ -0,0 +1,149 @@ +Automatically generated README for this automation recipe: **get-ml-model-using-imagenet-from-model-zoo** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-ml-model-using-imagenet-from-model-zoo,153e08828c4e45cc) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model,model-zoo,zoo,imagenet,image-classification* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model model-zoo zoo imagenet image-classification" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model,model-zoo,zoo,imagenet,image-classification` + +`cm run script --tags=get,ml-model,model-zoo,zoo,imagenet,image-classification[,variations] ` + +*or* + +`cmr "get ml-model model-zoo zoo imagenet image-classification"` + +`cmr "get ml-model model-zoo zoo imagenet image-classification [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,model-zoo,zoo,imagenet,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model,model-zoo,zoo,imagenet,image-classification"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model,model-zoo,zoo,imagenet,image-classification) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model model-zoo zoo imagenet image-classification[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**model-source**" +
+ Click here to expand this section. + + * `_model.#` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,zoo,deepsparse,_model-stub.# + * CM names: `--adr.['neural-magic-zoo-downloader']...` + - CM script: [get-ml-model-neuralmagic-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-neuralmagic-zoo) + * `_model.resnet101-pytorch-base` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,zoo,deepsparse,_model-stub.zoo:cv/classification/resnet_v1-101/pytorch/sparseml/imagenet/base-none + * CM names: `--adr.['neural-magic-zoo-downloader']...` + - CM script: [get-ml-model-neuralmagic-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-neuralmagic-zoo) + * `_model.resnet50-pruned95-uniform-quant` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,zoo,deepsparse,_model-stub.zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned95_uniform_quant-none + * CM names: `--adr.['neural-magic-zoo-downloader']...` + - CM script: [get-ml-model-neuralmagic-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-neuralmagic-zoo) + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json) + +___ +### Script output +`cmr "get ml-model model-zoo zoo imagenet image-classification [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL*` +#### New environment keys auto-detected from customize diff --git a/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json b/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json new file mode 100644 index 0000000000..e53d6160b7 --- /dev/null +++ b/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json @@ -0,0 +1,58 @@ +{ + "alias": "get-ml-model-using-imagenet-from-model-zoo", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "env": { + "CM_ML_MODEL": "resnet", + "CM_ML_MODEL_DATASET": "imagenet" + }, + "new_env_keys": [ + "CM_ML_MODEL*" + ], + "tags": [ + "get", + "ml-model", + "model-zoo", + "zoo", + "imagenet", + "image-classification" + ], + "uid": "153e08828c4e45cc", + "variations": { + "model.#": { + "deps": [ + { + "names": [ + "neural-magic-zoo-downloader" + ], + "tags": "get,ml-model,zoo,deepsparse,_model-stub.#" + } + ], + "group": "model-source" + }, + "model.resnet101-pytorch-base": { + "deps": [ + { + "names": [ + "neural-magic-zoo-downloader" + ], + "tags": "get,ml-model,zoo,deepsparse,_model-stub.zoo:cv/classification/resnet_v1-101/pytorch/sparseml/imagenet/base-none" + } + ], + "group": "model-source" + }, + "model.resnet50-pruned95-uniform-quant": { + "deps": [ + { + "names": [ + "neural-magic-zoo-downloader" + ], + "tags": "get,ml-model,zoo,deepsparse,_model-stub.zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned95_uniform_quant-none" + } + ], + "group": "model-source" + } + } +} diff --git a/script/get-ml-model-using-imagenet-from-model-zoo/customize.py b/script/get-ml-model-using-imagenet-from-model-zoo/customize.py new file mode 100644 index 0000000000..4fba39521b --- /dev/null +++ b/script/get-ml-model-using-imagenet-from-model-zoo/customize.py @@ -0,0 +1,20 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-mlperf-inference-intel-scratch-space/README.md b/script/get-mlperf-inference-intel-scratch-space/README.md new file mode 100644 index 0000000000..19199506b2 --- /dev/null +++ b/script/get-mlperf-inference-intel-scratch-space/README.md @@ -0,0 +1,163 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-intel-scratch-space** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-intel-scratch-space,e83fca30851f45ef) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,inference,intel,scratch,space* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf inference intel scratch space" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,inference,intel,scratch,space` + +`cm run script --tags=get,mlperf,inference,intel,scratch,space[,variations] [--input_flags]` + +*or* + +`cmr "get mlperf inference intel scratch space"` + +`cmr "get mlperf inference intel scratch space [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,intel,scratch,space' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,inference,intel,scratch,space"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,inference,intel,scratch,space) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf inference intel scratch space[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**version**" +
+ Click here to expand this section. + + * `_version.#` + - Environment variables: + - *CM_INTEL_SCRATCH_SPACE_VERSION*: `#` + - Workflow: + * **`_version.4_0`** (default) + - Environment variables: + - *CM_INTEL_SCRATCH_SPACE_VERSION*: `4_0` + - Workflow: + +
+ + +#### Default variations + +`_version.4_0` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--scratch_path=value` → `MLPERF_INTEL_SCRATCH_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "scratch_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-intel-scratch-space/_cm.json) + +___ +### Script output +`cmr "get mlperf inference intel scratch space [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_INTEL_MLPERF_SCRATCH_PATH` +* `CM_INTEL_SCRATCH_SPACE_VERSION` +#### New environment keys auto-detected from customize + +* `CM_INTEL_MLPERF_SCRATCH_PATH` \ No newline at end of file diff --git a/script/get-mlperf-inference-intel-scratch-space/_cm.json b/script/get-mlperf-inference-intel-scratch-space/_cm.json new file mode 100644 index 0000000000..3b2b650973 --- /dev/null +++ b/script/get-mlperf-inference-intel-scratch-space/_cm.json @@ -0,0 +1,48 @@ +{ + "alias": "get-mlperf-inference-intel-scratch-space", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "deps": [], + "docker": { + "run": false + }, + "input_description": {}, + "input_mapping": { + "scratch_path": "MLPERF_INTEL_SCRATCH_PATH" + }, + "new_env_keys": [ + "CM_INTEL_MLPERF_SCRATCH_PATH", + "CM_INTEL_SCRATCH_SPACE_VERSION" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "mlperf", + "inference", + "intel", + "scratch", + "space" + ], + "uid": "e83fca30851f45ef", + "variations": { + "version.#": { + "group": "version", + "env": { + "CM_INTEL_SCRATCH_SPACE_VERSION": "#" + } + }, + "version.4_0": { + "group": "version", + "default": true, + "env": { + "CM_INTEL_SCRATCH_SPACE_VERSION": "4_0" + } + } + }, + "versions": {} +} diff --git a/script/get-mlperf-inference-intel-scratch-space/customize.py b/script/get-mlperf-inference-intel-scratch-space/customize.py new file mode 100644 index 0000000000..37d9f4a5ed --- /dev/null +++ b/script/get-mlperf-inference-intel-scratch-space/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_INTEL_MLPERF_SCRATCH_PATH', '') == '': + env['CM_INTEL_MLPERF_SCRATCH_PATH'] = os.getcwd() + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_INTEL_MLPERF_SCRATCH_PATH'] + + return {'return':0} diff --git a/script/get-mlperf-inference-intel-scratch-space/run.bat b/script/get-mlperf-inference-intel-scratch-space/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/get-mlperf-inference-intel-scratch-space/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/get-mlperf-inference-intel-scratch-space/run.sh b/script/get-mlperf-inference-intel-scratch-space/run.sh new file mode 100644 index 0000000000..eb5ce24565 --- /dev/null +++ b/script/get-mlperf-inference-intel-scratch-space/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +scratch_path=${CM_NVIDIA_MLPERF_SCRATCH_PATH} +mkdir -p ${scratch_path}/data +mkdir -p ${scratch_path}/preprocessed_data +mkdir -p ${scratch_path}/models diff --git a/script/get-mlperf-inference-loadgen/README-extra.md b/script/get-mlperf-inference-loadgen/README-extra.md new file mode 100644 index 0000000000..7af6a0e4ac --- /dev/null +++ b/script/get-mlperf-inference-loadgen/README-extra.md @@ -0,0 +1,26 @@ +# Get MLCommons Inference Loadgen + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds and installs +the Loadgen library from [MLCommons Inference repository](https://github.com/mlcommons/inference). + +## Commands +To install +``` +cm run script --tags=get,mlperf,inference,loadgen --version=[VERSION] +``` +where +[VERSION] is one of +* `master:` Uses the master branch of inference source repository to build loadgen +* `r2.1:` Uses the release branch used for MLCommons inference 2.1 round to build loadgen + +## Exported Variables +* `C_INCLUDE_PATH` +* `CPLUS_INCLUDE_PATH` +* `LD_LIBRARY_PATH` +* `DYLD_FALLBACK_LIBRARY_PATH` +* `PYTHONPATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 +3. Windows (installs into Python distro directly) diff --git a/script/get-mlperf-inference-loadgen/README.md b/script/get-mlperf-inference-loadgen/README.md new file mode 100644 index 0000000000..f404773aa8 --- /dev/null +++ b/script/get-mlperf-inference-loadgen/README.md @@ -0,0 +1,222 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-loadgen** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-loadgen,64c3d98d0ba04950) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,loadgen,inference,inference-loadgen,mlperf,mlcommons* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get loadgen inference inference-loadgen mlperf mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,loadgen,inference,inference-loadgen,mlperf,mlcommons` + +`cm run script --tags=get,loadgen,inference,inference-loadgen,mlperf,mlcommons[,variations] ` + +*or* + +`cmr "get loadgen inference inference-loadgen mlperf mlcommons"` + +`cmr "get loadgen inference inference-loadgen mlperf mlcommons [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,loadgen,inference,inference-loadgen,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,loadgen,inference,inference-loadgen,mlperf,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,loadgen,inference,inference-loadgen,mlperf,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get loadgen inference inference-loadgen mlperf mlcommons[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_copy` + - Workflow: + * `_custom-python` + - Environment variables: + - *CM_TMP_USE_CUSTOM_PYTHON*: `on` + - Workflow: + * `_download` + - Environment variables: + - *CM_DOWNLOAD_CHECKSUM*: `af3f9525965b2c1acc348fb882a5bfd1` + - *CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD*: `YES` + - *CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL*: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0` + - *CM_MLPERF_INFERENCE_LOADGEN_VERSION*: `v3.1` + - *CM_VERIFY_SSL*: `False` + - Workflow: + * `_download_v3.1` + - Environment variables: + - *CM_DOWNLOAD_CHECKSUM*: `af3f9525965b2c1acc348fb882a5bfd1` + - *CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD*: `YES` + - *CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL*: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0` + - *CM_MLPERF_INFERENCE_LOADGEN_VERSION*: `v3.1` + - *CM_VERIFY_SSL*: `False` + - Workflow: + * `_download_v4.0` + - Environment variables: + - *CM_DOWNLOAD_CHECKSUM*: `b4d97525d9ad0539a64667f2a3ca20c5` + - *CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD*: `YES` + - *CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL*: `https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0` + - *CM_MLPERF_INFERENCE_LOADGEN_VERSION*: `v4.0` + - *CM_VERIFY_SSL*: `False` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_SHARED_BUILD: `no` + +
+ +#### Versions +Default version: `master` + +* `custom` +* `main` +* `master` +* `pybind_fix` +* `r2.1` +* `r3.0` +* `r3.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * `if (CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD != YES)` + * CM names: `--adr.['inference-src-loadgen']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * download-and-extract,file,_wget,_extract + * `if (CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD == YES)` + * CM names: `--adr.['inference-src-loadgen-download']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + * get,compiler + * `if (CM_HOST_OS_TYPE != windows)` + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + * get,cl + * `if (CM_HOST_OS_TYPE == windows)` + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + * get,cmake + * CM names: `--adr.['cmake']...` + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,generic-python-lib,_package.wheel + * CM names: `--adr.['pip-package', 'wheel']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pip + * CM names: `--adr.['pip-package', 'pip']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.pybind11 + * CM names: `--adr.['pip-package', 'pybind11']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.setuptools + * CM names: `--adr.['pip-package', 'setuputils']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-loadgen/_cm.yaml) + +___ +### Script output +`cmr "get loadgen inference inference-loadgen mlperf mlcommons [,variations]" -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PYTHONPATH` +* `CM_MLPERF_INFERENCE_LOADGEN_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_INFERENCE_LOADGEN_INCLUDE_PATH` +* `CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH` +* `CM_MLPERF_INFERENCE_LOADGEN_LIBRARY_PATH` +* `CM_MLPERF_INFERENCE_LOADGEN_PYTHON_PATH` \ No newline at end of file diff --git a/script/get-mlperf-inference-loadgen/_cm.yaml b/script/get-mlperf-inference-loadgen/_cm.yaml new file mode 100644 index 0000000000..3d8b60f8c1 --- /dev/null +++ b/script/get-mlperf-inference-loadgen/_cm.yaml @@ -0,0 +1,169 @@ +alias: get-mlperf-inference-loadgen +uid: 64c3d98d0ba04950 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: MLPerf benchmark support + +default_env: + CM_SHARED_BUILD: 'no' + +default_version: master + +deps: +- tags: detect,os +- names: + - python3 + - python + tags: get,python3 +- force_env_keys: + - CM_GIT_URL + - CM_GIT_CHECKOUT + names: + - inference-src-loadgen + skip_if_env: + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: + - 'YES' + tags: get,mlcommons,inference,src +- enable_if_env: + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: + - 'YES' + force_cache: true + names: + - inference-src-loadgen-download + tags: download-and-extract,file,_wget,_extract + update_tags_from_env_with_prefix: + _url.: + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL +- names: + - compiler + skip_if_env: + CM_HOST_OS_TYPE: + - windows + tags: get,compiler +- enable_if_env: + CM_HOST_OS_TYPE: + - windows + names: + - compiler + tags: get,cl +- names: + - cmake + tags: get,cmake + version_min: '3.12' +- names: + - pip-package + - wheel + tags: get,generic-python-lib,_package.wheel +- names: + - pip-package + - pip + tags: get,generic-python-lib,_pip +- names: + - pip-package + - pybind11 + tags: get,generic-python-lib,_package.pybind11 +- names: + - pip-package + - setuputils + tags: get,generic-python-lib,_package.setuptools + +extra_cache_tags_from_env: +- env: CM_PYTHON_CACHE_TAGS + prefix: python- +- env: CM_COMPILER_CACHE_TAGS + prefix: compiler- + +new_env_keys: +- +PYTHONPATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +- CM_MLPERF_INFERENCE_LOADGEN_* + +tags: +- get +- loadgen +- inference +- inference-loadgen +- mlperf +- mlcommons + +variations: + copy: + add_deps: + inference-src-loadgen: + env: + CM_GIT_URL: https://github.com/cknowledge/mlperf-inference-loadgen-copy + # You still need to add --version=main since it's forced here to + custom-python: + ad: + pip-package: + tags: _custom-python + python3: + skip_if_env: + CM_TMP_USE_CUSTOM_PYTHON: + - 'on' + env: + CM_TMP_USE_CUSTOM_PYTHON: 'on' + download: + env: + CM_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 + CM_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 + CM_VERIFY_SSL: false + download_v3.1: + env: + CM_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 + CM_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 + CM_VERIFY_SSL: false + download_v4.0: + env: + CM_DOWNLOAD_CHECKSUM: b4d97525d9ad0539a64667f2a3ca20c5 + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0 + CM_MLPERF_INFERENCE_LOADGEN_VERSION: v4.0 + CM_VERIFY_SSL: false + +versions: + custom: + add_deps: + inference-src-loadgen: + version: custom + main: + add_deps: + inference-src-loadgen: + version: main + master: + add_deps: + inference-src-loadgen: + version: master + pybind_fix: + add_deps: + inference-src-loadgen: + version: pybind_fix + r2.1: + add_deps: + inference-src-loadgen: + tags: _pybind + version: r2.1 + r3.0: + add_deps: + inference-src-loadgen: + tags: _pybind + version: r3.0 + r3.1: + add_deps: + inference-src-loadgen: + tags: _pybind + version: r3.1 + +print_env_at_the_end: + CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH: "Path to the tool" diff --git a/script/get-mlperf-inference-loadgen/customize.py b/script/get-mlperf-inference-loadgen/customize.py new file mode 100644 index 0000000000..35108ed054 --- /dev/null +++ b/script/get-mlperf-inference-loadgen/customize.py @@ -0,0 +1,46 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if key not in env: + env[key] = [] + + # On Windows installs directly into Python distro for simplicity +# if os_info['platform'] != 'windows': + + cur_path = os.getcwd() + install_path = os.path.join(cur_path, 'install') + + env['CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH'] = install_path + + include_path = os.path.join(install_path, 'include') + lib_path = os.path.join(install_path, 'lib') + python_path = os.path.join(install_path, 'python') + + env['+C_INCLUDE_PATH'].append(include_path) + env['+CPLUS_INCLUDE_PATH'].append(include_path) + env['CM_MLPERF_INFERENCE_LOADGEN_INCLUDE_PATH'] = include_path + + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + env['CM_MLPERF_INFERENCE_LOADGEN_LIBRARY_PATH'] = lib_path + + env['+PYTHONPATH'].append(python_path) + env['CM_MLPERF_INFERENCE_LOADGEN_PYTHON_PATH'] = python_path + + return {'return':0} diff --git a/script/get-mlperf-inference-loadgen/run.bat b/script/get-mlperf-inference-loadgen/run.bat new file mode 100644 index 0000000000..6d97f12b4e --- /dev/null +++ b/script/get-mlperf-inference-loadgen/run.bat @@ -0,0 +1,39 @@ +@echo off + +echo ======================================================= + +set CUR_DIR=%cd% +echo Current path in CM script: %CUR_DIR% + +if "%CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD%" == "YES" ( + set CM_MLPERF_INFERENCE_SOURCE=%CM_EXTRACT_EXTRACTED_PATH% +) + +set INSTALL_DIR=%CUR_DIR%\install + +echo. +echo Switching to %CM_MLPERF_INFERENCE_SOURCE%\loadgen + +cd %CM_MLPERF_INFERENCE_SOURCE%\loadgen +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +echo Running %CM_PYTHON_BIN% setup.py develop + +%CM_PYTHON_BIN% setup.py develop +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= +cmake ^ + -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ^ + %CM_MLPERF_INFERENCE_SOURCE%\loadgen ^ + -DPYTHON_EXECUTABLE:FILEPATH=%CM_PYTHON_BIN_WITH_PATH% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= +cmake --build . --target install +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +del /Q /S build + +echo ======================================================= diff --git a/script/get-mlperf-inference-loadgen/run.sh b/script/get-mlperf-inference-loadgen/run.sh new file mode 100644 index 0000000000..c35ce4bdd0 --- /dev/null +++ b/script/get-mlperf-inference-loadgen/run.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +CUR_DIR=$PWD + +mkdir -p install +mkdir -p build + +INSTALL_DIR="${CUR_DIR}/install" + +echo "******************************************************" + +cd build + +if [ "${CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD}" == "YES" ]; then + export CM_MLPERF_INFERENCE_SOURCE="${CM_EXTRACT_EXTRACTED_PATH}" +fi + + +if [ -z "${CM_MLPERF_INFERENCE_SOURCE}" ]; then + echo "Error: env CM_MLPERF_INFERENCE_SOURCE is not defined - something is wrong with script automation!" + exit 1 +fi + +cmake \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ + "${CM_MLPERF_INFERENCE_SOURCE}/loadgen" \ + -DPYTHON_EXECUTABLE:FILEPATH=${CM_PYTHON_BIN_WITH_PATH} +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +cmake --build . --target install -j ${CM_MAKE_CORES} +if [ "${?}" != "0" ]; then exit 1; fi + +# Clean build directory (too large) +cd "${CUR_DIR}" +rm -rf build + +PYTHON_VERSION=`${CM_PYTHON_BIN_WITH_PATH} -V |cut -d' ' -f2` +PYTHON_SHORT_VERSION=${PYTHON_VERSION%.*} +PYTHON_MINOR_VERSION=${PYTHON_SHORT_VERSION#*.} +MLPERF_INFERENCE_PYTHON_SITE_BASE=${INSTALL_DIR}"/python" + +cd "${CM_MLPERF_INFERENCE_SOURCE}/loadgen" +CFLAGS="-std=c++14 -O3" ${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel +${CM_PYTHON_BIN_WITH_PATH} -m pip install --force-reinstall `ls dist/mlperf_loadgen-*cp3${PYTHON_MINOR_VERSION}*.whl` --target=${MLPERF_INFERENCE_PYTHON_SITE_BASE} +if [ "${?}" != "0" ]; then exit 1; fi + +# Clean the built wheel +find . -name 'mlperf_loadgen*.whl' | xargs rm + +echo "******************************************************" +echo "Loadgen is built and installed to ${INSTALL_DIR} ..." diff --git a/script/get-mlperf-inference-loadgen/tests/download-and-install.bat b/script/get-mlperf-inference-loadgen/tests/download-and-install.bat new file mode 100644 index 0000000000..868f0296c8 --- /dev/null +++ b/script/get-mlperf-inference-loadgen/tests/download-and-install.bat @@ -0,0 +1,2 @@ +cmr "get loadgen _download" + diff --git a/script/get-mlperf-inference-nvidia-common-code/README-extra.md b/script/get-mlperf-inference-nvidia-common-code/README-extra.md new file mode 100644 index 0000000000..411a2248c5 --- /dev/null +++ b/script/get-mlperf-inference-nvidia-common-code/README-extra.md @@ -0,0 +1,9 @@ +# Get MLPerf Nvidia Common code +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) exports the PYTHONPATH to the common code used by Nvidia for MLPerf submissions + +## Exported Variables +* `+PYTHONPATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-mlperf-inference-nvidia-common-code/README.md b/script/get-mlperf-inference-nvidia-common-code/README.md new file mode 100644 index 0000000000..1980c9f12c --- /dev/null +++ b/script/get-mlperf-inference-nvidia-common-code/README.md @@ -0,0 +1,152 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-common-code** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-nvidia-common-code,26b78bf3ffdc4926) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-common-code)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,nvidia,mlperf,inference,common-code* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get nvidia mlperf inference common-code" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,nvidia,mlperf,inference,common-code` + +`cm run script --tags=get,nvidia,mlperf,inference,common-code[,variations] ` + +*or* + +`cmr "get nvidia mlperf inference common-code"` + +`cmr "get nvidia mlperf inference common-code [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,nvidia,mlperf,inference,common-code' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,nvidia,mlperf,inference,common-code"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,nvidia,mlperf,inference,common-code) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get nvidia mlperf inference common-code[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**repo-owner**" +
+ Click here to expand this section. + + * `_ctuning` + - Workflow: + * `_custom` + - Workflow: + * `_mlcommons` + - Workflow: + * `_nvidia-only` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `r3.1` + +* `r2.1` +* `r3.0` +* `r3.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-common-code/_cm.json)*** + * get,mlperf,inference,results + * CM names: `--adr.['mlperf-inference-results']...` + - CM script: [get-mlperf-inference-results](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results) + - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-common-code/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-common-code/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-common-code/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-common-code/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-common-code/_cm.json) + +___ +### Script output +`cmr "get nvidia mlperf inference common-code [,variations]" -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +* `CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH` \ No newline at end of file diff --git a/script/get-mlperf-inference-nvidia-common-code/_cm.json b/script/get-mlperf-inference-nvidia-common-code/_cm.json new file mode 100644 index 0000000000..46f12477e1 --- /dev/null +++ b/script/get-mlperf-inference-nvidia-common-code/_cm.json @@ -0,0 +1,67 @@ +{ + "alias": "get-mlperf-inference-nvidia-common-code", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_version": "r3.1", + "clean_files": [], + "deps": [ + { + "names": [ + "mlperf-inference-results" + ], + "tags": "get,mlperf,inference,results", + "inherit_variation_tags": true + } + ], + "new_env_keys": [ + "+PYTHONPATH", + "CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH" + ], + "tags": [ + "get", + "nvidia", + "mlperf", + "inference", + "common-code" + ], + "uid": "26b78bf3ffdc4926", + "variations": { + "mlcommons": { + "group": "repo-owner" + }, + "custom": { + "group": "repo-owner" + }, + "ctuning": { + "group": "repo-owner" + }, + "nvidia-only": { + "group": "repo-owner" + } + }, + "versions": { + "r2.1": { + "add_deps_recursive": { + "mlperf-inference-results": { + "version": "v2.1" + } + } + }, + "r3.0": { + "add_deps_recursive": { + "mlperf-inference-results": { + "version": "v3.0" + } + } + }, + "r3.1": { + "add_deps_recursive": { + "mlperf-inference-results": { + "version": "v3.1" + } + } + } + } +} diff --git a/script/get-mlperf-inference-nvidia-common-code/customize.py b/script/get-mlperf-inference-nvidia-common-code/customize.py new file mode 100644 index 0000000000..57e0ea43f4 --- /dev/null +++ b/script/get-mlperf-inference-nvidia-common-code/customize.py @@ -0,0 +1,19 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return':0} + + +def postprocess(i): + env = i['env'] + + env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], "closed", "NVIDIA") + env['+PYTHONPATH'] = [ env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] ] + + return {'return':0} diff --git a/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md b/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md new file mode 100644 index 0000000000..582991f6d2 --- /dev/null +++ b/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/script/get-mlperf-inference-nvidia-scratch-space/README.md b/script/get-mlperf-inference-nvidia-scratch-space/README.md new file mode 100644 index 0000000000..867acacc1b --- /dev/null +++ b/script/get-mlperf-inference-nvidia-scratch-space/README.md @@ -0,0 +1,164 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-scratch-space** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-nvidia-scratch-space,0b2bec8b29fb4ab7) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,inference,nvidia,scratch,space* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf inference nvidia scratch space" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,inference,nvidia,scratch,space` + +`cm run script --tags=get,mlperf,inference,nvidia,scratch,space[,variations] [--input_flags]` + +*or* + +`cmr "get mlperf inference nvidia scratch space"` + +`cmr "get mlperf inference nvidia scratch space [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,nvidia,scratch,space' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,inference,nvidia,scratch,space"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,inference,nvidia,scratch,space) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf inference nvidia scratch space[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**version**" +
+ Click here to expand this section. + + * `_version.#` + - Environment variables: + - *CM_NVIDIA_SCRATCH_SPACE_VERSION*: `#` + - Workflow: + * **`_version.4_0`** (default) + - Environment variables: + - *CM_NVIDIA_SCRATCH_SPACE_VERSION*: `4_0` + - Workflow: + +
+ + +#### Default variations + +`_version.4_0` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--scratch_path=value` → `CM_NVIDIA_MLPERF_SCRATCH_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "scratch_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-nvidia-scratch-space/_cm.json) + +___ +### Script output +`cmr "get mlperf inference nvidia scratch space [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_NVIDIA_MLPERF_SCRATCH_PATH` +* `CM_NVIDIA_SCRATCH_SPACE_VERSION` +* `MLPERF_SCRATCH_PATH` +#### New environment keys auto-detected from customize + +* `CM_NVIDIA_MLPERF_SCRATCH_PATH` \ No newline at end of file diff --git a/script/get-mlperf-inference-nvidia-scratch-space/_cm.json b/script/get-mlperf-inference-nvidia-scratch-space/_cm.json new file mode 100644 index 0000000000..0ff47e4b8b --- /dev/null +++ b/script/get-mlperf-inference-nvidia-scratch-space/_cm.json @@ -0,0 +1,49 @@ +{ + "alias": "get-mlperf-inference-nvidia-scratch-space", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "deps": [], + "input_description": {}, + "input_mapping": { + "scratch_path": "CM_NVIDIA_MLPERF_SCRATCH_PATH" + }, + "new_env_keys": [ + "CM_NVIDIA_MLPERF_SCRATCH_PATH", + "MLPERF_SCRATCH_PATH", + "CM_NVIDIA_SCRATCH_SPACE_VERSION" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "mlperf", + "inference", + "nvidia", + "scratch", + "space" + ], + "uid": "0b2bec8b29fb4ab7", + "variations": { + "version.#": { + "group": "version", + "env": { + "CM_NVIDIA_SCRATCH_SPACE_VERSION": "#" + } + }, + "version.4_0": { + "group": "version", + "default": true, + "env": { + "CM_NVIDIA_SCRATCH_SPACE_VERSION": "4_0" + } + } + }, + "versions": {}, + "docker": { + "run": false + } +} diff --git a/script/get-mlperf-inference-nvidia-scratch-space/customize.py b/script/get-mlperf-inference-nvidia-scratch-space/customize.py new file mode 100644 index 0000000000..1bfa6c9580 --- /dev/null +++ b/script/get-mlperf-inference-nvidia-scratch-space/customize.py @@ -0,0 +1,31 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_NVIDIA_MLPERF_SCRATCH_PATH', '') == '': + if env.get('MLPERF_SCRATCH_PATH','') != '': + env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = env['MLPERF_SCRATCH_PATH'] + else: + env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = os.getcwd() + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + + return {'return':0} diff --git a/script/get-mlperf-inference-nvidia-scratch-space/run.bat b/script/get-mlperf-inference-nvidia-scratch-space/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/get-mlperf-inference-nvidia-scratch-space/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/get-mlperf-inference-nvidia-scratch-space/run.sh b/script/get-mlperf-inference-nvidia-scratch-space/run.sh new file mode 100644 index 0000000000..eb5ce24565 --- /dev/null +++ b/script/get-mlperf-inference-nvidia-scratch-space/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +scratch_path=${CM_NVIDIA_MLPERF_SCRATCH_PATH} +mkdir -p ${scratch_path}/data +mkdir -p ${scratch_path}/preprocessed_data +mkdir -p ${scratch_path}/models diff --git a/script/get-mlperf-inference-results-dir/README.md b/script/get-mlperf-inference-results-dir/README.md new file mode 100644 index 0000000000..e12b811431 --- /dev/null +++ b/script/get-mlperf-inference-results-dir/README.md @@ -0,0 +1,161 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-results-dir** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-results-dir,84f3c5aad5e1444b) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results-dir)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,inference,results,dir,directory* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf inference results dir directory" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,inference,results,dir,directory` + +`cm run script --tags=get,mlperf,inference,results,dir,directory[,variations] [--input_flags]` + +*or* + +`cmr "get mlperf inference results dir directory"` + +`cmr "get mlperf inference results dir directory [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,results,dir,directory' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,inference,results,dir,directory"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,inference,results,dir,directory) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf inference results dir directory[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**version**" +
+ Click here to expand this section. + + * `_version.#` + - Environment variables: + - *CM_MLPERF_INFERENCE_RESULTS_VERSION*: `#` + - Workflow: + * **`_version.4_0`** (default) + - Environment variables: + - *CM_MLPERF_INFERENCE_RESULTS_VERSION*: `4_0` + - Workflow: + +
+ + +#### Default variations + +`_version.4_0` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "results_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results-dir/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results-dir/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results-dir/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results-dir/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results-dir/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results-dir/_cm.json) + +___ +### Script output +`cmr "get mlperf inference results dir directory [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_INFERENCE_RESULTS_DIR` +* `CM_MLPERF_INFERENCE_RESULTS_VERSION` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_INFERENCE_RESULTS_DIR` \ No newline at end of file diff --git a/script/get-mlperf-inference-results-dir/_cm.json b/script/get-mlperf-inference-results-dir/_cm.json new file mode 100644 index 0000000000..3e9eb912b5 --- /dev/null +++ b/script/get-mlperf-inference-results-dir/_cm.json @@ -0,0 +1,48 @@ +{ + "alias": "get-mlperf-inference-results-dir", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "deps": [], + "docker": { + "run": false + }, + "input_description": {}, + "input_mapping": { + "results_dir": "CM_MLPERF_INFERENCE_RESULTS_DIR" + }, + "new_env_keys": [ + "CM_MLPERF_INFERENCE_RESULTS_DIR", + "CM_MLPERF_INFERENCE_RESULTS_VERSION" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "mlperf", + "inference", + "results", + "dir", + "directory" + ], + "uid": "84f3c5aad5e1444b", + "variations": { + "version.#": { + "group": "version", + "env": { + "CM_MLPERF_INFERENCE_RESULTS_VERSION": "#" + } + }, + "version.4_0": { + "group": "version", + "default": true, + "env": { + "CM_MLPERF_INFERENCE_RESULTS_VERSION": "4_0" + } + } + }, + "versions": {} +} diff --git a/script/get-mlperf-inference-results-dir/customize.py b/script/get-mlperf-inference-results-dir/customize.py new file mode 100644 index 0000000000..8f013816a1 --- /dev/null +++ b/script/get-mlperf-inference-results-dir/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR','') == '': + env['CM_MLPERF_INFERENCE_RESULTS_DIR'] = os.getcwd() + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + + return {'return':0} diff --git a/script/get-mlperf-inference-results/README-extra.md b/script/get-mlperf-inference-results/README-extra.md new file mode 100644 index 0000000000..8ed3bed399 --- /dev/null +++ b/script/get-mlperf-inference-results/README-extra.md @@ -0,0 +1,18 @@ +# Get MLCommons Inference Results +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Inference results repository](https://github.com/mlcommons/inference_v2.1). + +## Commands +To install +``` +cm run script --tags=get,mlperf,inference,results --version=[VERSION] +``` + +[VERSION] is one of +* `v2.1:` MLCommons inference 2.1 round results + +## Exported Variables +* `CM_MLPERF_INFERENCE_RESULTS_PATH`: Directory path to the inference results repository + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-mlperf-inference-results/README.md b/script/get-mlperf-inference-results/README.md new file mode 100644 index 0000000000..15e7dec33f --- /dev/null +++ b/script/get-mlperf-inference-results/README.md @@ -0,0 +1,165 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-results** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-results,36bae5b25dbe41da) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,results,inference,inference-results,mlcommons,mlperf* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get results inference inference-results mlcommons mlperf" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,results,inference,inference-results,mlcommons,mlperf` + +`cm run script --tags=get,results,inference,inference-results,mlcommons,mlperf[,variations] ` + +*or* + +`cmr "get results inference inference-results mlcommons mlperf"` + +`cmr "get results inference inference-results mlcommons mlperf [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,results,inference,inference-results,mlcommons,mlperf' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,results,inference,inference-results,mlcommons,mlperf"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,results,inference,inference-results,mlcommons,mlperf) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get results inference inference-results mlcommons mlperf[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**source-repo**" +
+ Click here to expand this section. + + * `_ctuning` + - Environment variables: + - *GITHUB_REPO_OWNER*: `ctuning` + - Workflow: + * `_custom` + - Environment variables: + - *GITHUB_REPO_OWNER*: `arjunsuresh` + - Workflow: + * **`_mlcommons`** (default) + - Environment variables: + - *GITHUB_REPO_OWNER*: `mlcommons` + - Workflow: + * `_nvidia-only` + - Environment variables: + - *GITHUB_REPO_OWNER*: `GATEOverflow` + - *NVIDIA_ONLY*: `yes` + - Workflow: + +
+ + +#### Default variations + +`_mlcommons` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT: `master` +* CM_GIT_DEPTH: `--depth 1` +* CM_GIT_PATCH: `no` + +
+ +#### Versions +Default version: `v3.1` + +* `v2.1` +* `v3.0` +* `v3.1` +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results/_cm.json)*** + * get,git,repo + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-results/_cm.json) + +___ +### Script output +`cmr "get results inference inference-results mlcommons mlperf [,variations]" -j` +#### New environment keys (filter) + +* `CM_MLPERF_INFERENCE_RESULTS_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_INFERENCE_RESULTS_PATH` \ No newline at end of file diff --git a/script/get-mlperf-inference-results/_cm.json b/script/get-mlperf-inference-results/_cm.json new file mode 100644 index 0000000000..46feecd049 --- /dev/null +++ b/script/get-mlperf-inference-results/_cm.json @@ -0,0 +1,90 @@ +{ + "alias": "get-mlperf-inference-results", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + "CM_GIT_CHECKOUT": "master", + "CM_GIT_DEPTH": "--depth 1", + "CM_GIT_PATCH": "no" + }, + "default_version": "v3.1", + "prehook_deps": [ + { + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_repo.": [ "CM_GIT_URL" ] + }, + "force_env_keys": [ + "CM_GIT_*" + ], + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_INFERENCE_RESULTS_PATH" + }, + "extra_cache_tags": "mlperf,inference,results" + } + ], + "deps": [ + ], + "new_env_keys": [ + "CM_MLPERF_INFERENCE_RESULTS_*" + ], + "tags": [ + "get", + "results", + "inference", + "inference-results", + "mlcommons", + "mlperf" + ], + "uid": "36bae5b25dbe41da", + "versions": { + "v2.1": { + "env": { + "CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME": "v2.1", + "CM_GIT_URL": "https://github.com/<<>>/inference_results_v2.1.git" + } + }, + "v3.0": { + "env": { + "CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME": "v3.0", + "CM_GIT_URL": "https://github.com/<<>>/inference_results_v3.0.git" + } + }, + "v3.1": { + "env": { + "CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME": "v3.1", + "CM_GIT_URL": "https://github.com/<<>>/inference_results_v3.1.git" + } + } + }, + "variations": { + "mlcommons": { + "group": "source-repo", + "default": true, + "env": { + "GITHUB_REPO_OWNER": "mlcommons" + } + }, + "custom": { + "group": "source-repo", + "env": { + "GITHUB_REPO_OWNER": "arjunsuresh" + } + }, + "ctuning": { + "group": "source-repo", + "env": { + "GITHUB_REPO_OWNER": "ctuning" + } + }, + "nvidia-only": { + "group": "source-repo", + "env": { + "GITHUB_REPO_OWNER": "GATEOverflow", + "NVIDIA_ONLY": "yes" + } + } + } +} diff --git a/script/get-mlperf-inference-results/customize.py b/script/get-mlperf-inference-results/customize.py new file mode 100644 index 0000000000..747d99e52d --- /dev/null +++ b/script/get-mlperf-inference-results/customize.py @@ -0,0 +1,46 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if env.get('NVIDIA_ONLY', '') == 'yes': + env['CM_GIT_URL'] = "https://github.com/GATEOverflow/nvidia-inference-code.git" + + if 'GITHUB_REPO_OWNER' in env and '<<>>' in env['CM_GIT_URL']: + env['CM_GIT_URL'] = env['CM_GIT_URL'].replace('<<>>', env['GITHUB_REPO_OWNER']) + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + need_version = env.get('CM_VERSION','') + versions = meta['versions'] + + if need_version!='' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': + env['CM_VERSION'] += "-git-"+env['CM_GIT_REPO_CURRENT_HASH'] + +# env['CM_MLPERF_INFERENCE_RESULTS_PATH'] = os.path.join(os.getcwd(), "inference_results_"+env['CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME']) + + return {'return':0} diff --git a/script/get-mlperf-inference-src/README-extra.md b/script/get-mlperf-inference-src/README-extra.md new file mode 100644 index 0000000000..a96611831c --- /dev/null +++ b/script/get-mlperf-inference-src/README-extra.md @@ -0,0 +1,29 @@ +# Get MLCommons Inference Source +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Inference repository](https://github.com/mlcommons/inference). + +## Commands +To install +``` +cm run script --tags=get,mlperf,inference,src,[VARIATION] --version=[VERSION] +``` +where [VARIATION] is one of +* `default:` Works with the official MLCommons inference repository. Uses `short-history` variation +* `patch:` Applies the `git.patch` to the cloned git repository +* `octoml:` Works with the OctoML fork of the MLCommons inference repository. Uses `short-history` variation +* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) +* `full-history:` Uses the full git history +* `no-recurse-submodules:` Only download the main repository + +[VERSION] is one of +* `master:` Uses the master branch +* `r2.1:` Uses the release branch used for MLCommons inference 2.1 round + +## Exported Variables +* `CM_MLPERF_INFERENCE_SOURCE`: Directory path of the cloned inference repository +* `CM_MLPERF_INFERENCE_VISION_PATH`: Directory path to the vision folder inside the inference repository +* `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module +* `CM_MLPERF_INFERENCE_MODELS`: This `state` variable contains the configuration of the MLPerf models as per the selected version + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-mlperf-inference-src/README.md b/script/get-mlperf-inference-src/README.md new file mode 100644 index 0000000000..6d854cedcf --- /dev/null +++ b/script/get-mlperf-inference-src/README.md @@ -0,0 +1,268 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-src** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-src,4b57186581024797) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,src,source,inference,inference-src,inference-source,mlperf,mlcommons* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get src source inference inference-src inference-source mlperf mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,src,source,inference,inference-src,inference-source,mlperf,mlcommons` + +`cm run script --tags=get,src,source,inference,inference-src,inference-source,mlperf,mlcommons[,variations] ` + +*or* + +`cmr "get src source inference inference-src inference-source mlperf mlcommons"` + +`cmr "get src source inference inference-src inference-source mlperf mlcommons [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,inference,inference-src,inference-source,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,src,source,inference,inference-src,inference-source,mlperf,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,src,source,inference,inference-src,inference-source,mlperf,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get src source inference inference-src inference-source mlperf mlcommons[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_3d-unet` + - Environment variables: + - *CM_SUBMODULE_3D_UNET*: `yes` + - Workflow: + * `_deeplearningexamples` + - Environment variables: + - *CM_SUBMODULE_DEEPLEARNINGEXAMPLES*: `yes` + - Workflow: + * `_deepsparse` + - Environment variables: + - *CM_GIT_CHECKOUT*: `deepsparse` + - *CM_GIT_URL*: `https://github.com/neuralmagic/inference` + - *CM_MLPERF_LAST_RELEASE*: `v4.0` + - Workflow: + * `_gn` + - Environment variables: + - *CM_SUBMODULE_GN*: `yes` + - Workflow: + * `_no-recurse-submodules` + - Environment variables: + - *CM_GIT_RECURSE_SUBMODULES*: `` + - Workflow: + * `_nvidia-pycocotools` + - Environment variables: + - *CM_GIT_PATCH_FILENAME*: `coco.patch` + - Workflow: + * `_octoml` + - Environment variables: + - *CM_GIT_URL*: `https://github.com/octoml/inference` + - Workflow: + * `_openimages-nvidia-pycocotools` + - Environment variables: + - *CM_GIT_PATCH_FILENAME*: `openimages-pycocotools.patch` + - Workflow: + * `_patch` + - Environment variables: + - *CM_GIT_PATCH*: `yes` + - Workflow: + * `_pybind` + - Environment variables: + - *CM_SUBMODULE_PYBIND*: `yes` + - Workflow: + * `_recurse-submodules` + - Environment variables: + - *CM_GIT_RECURSE_SUBMODULES*: ` --recurse-submodules` + - Workflow: + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * `_submodules.#` + - Environment variables: + - *CM_GIT_SUBMODULES*: `#` + - Workflow: + +
+ + + * Group "**checkout**" +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_sha.#` + - Environment variables: + - *CM_GIT_SHA*: `#` + - Workflow: + +
+ + + * Group "**git-history**" +
+ Click here to expand this section. + + * `_full-history` + - Environment variables: + - *CM_GIT_DEPTH*: `` + - Workflow: + * **`_short-history`** (default) + - Environment variables: + - *CM_GIT_DEPTH*: `--depth 10` + - Workflow: + +
+ + +#### Default variations + +`_short-history` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT_FOLDER: `inference` +* CM_GIT_DEPTH: `--depth 4` +* CM_GIT_PATCH: `no` +* CM_GIT_RECURSE_SUBMODULES: `` +* CM_GIT_URL: `https://github.com/mlcommons/inference.git` + +
+ +#### Versions +Default version: `master` + +* `custom` +* `deepsparse` +* `main` +* `master` +* `pybind_fix` +* `r2.1` +* `r3.0` +* `r3.1` +* `tvm` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-src/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-src/_cm.json)*** + * get,git,repo + * CM names: `--adr.['inference-git-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-src/_cm.json) + +___ +### Script output +`cmr "get src source inference inference-src inference-source mlperf mlcommons [,variations]" -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +* `CM_MLPERF_INFERENCE_3DUNET_PATH` +* `CM_MLPERF_INFERENCE_BERT_PATH` +* `CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH` +* `CM_MLPERF_INFERENCE_CONF_PATH` +* `CM_MLPERF_INFERENCE_DLRM_PATH` +* `CM_MLPERF_INFERENCE_DLRM_V2_PATH` +* `CM_MLPERF_INFERENCE_GPTJ_PATH` +* `CM_MLPERF_INFERENCE_RNNT_PATH` +* `CM_MLPERF_INFERENCE_SOURCE` +* `CM_MLPERF_INFERENCE_VERSION` +* `CM_MLPERF_INFERENCE_VISION_PATH` +* `CM_MLPERF_LAST_RELEASE` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_INFERENCE_3DUNET_PATH` +* `CM_MLPERF_INFERENCE_BERT_PATH` +* `CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH` +* `CM_MLPERF_INFERENCE_CONF_PATH` +* `CM_MLPERF_INFERENCE_DLRM_PATH` +* `CM_MLPERF_INFERENCE_DLRM_V2_PATH` +* `CM_MLPERF_INFERENCE_GPTJ_PATH` +* `CM_MLPERF_INFERENCE_RNNT_PATH` +* `CM_MLPERF_INFERENCE_VISION_PATH` \ No newline at end of file diff --git a/script/get-mlperf-inference-src/_cm.json b/script/get-mlperf-inference-src/_cm.json new file mode 100644 index 0000000000..4e4c4806d6 --- /dev/null +++ b/script/get-mlperf-inference-src/_cm.json @@ -0,0 +1,262 @@ +{ + "alias": "get-mlperf-inference-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + "CM_GIT_CHECKOUT_FOLDER": "inference", + "CM_GIT_DEPTH": "--depth 4", + "CM_GIT_PATCH": "no", + "CM_GIT_RECURSE_SUBMODULES": "", + "CM_GIT_URL": "https://github.com/mlcommons/inference.git" + }, + "default_version": "master", + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ], + "new_env_keys": [ + "CM_MLPERF_INFERENCE_3DUNET_PATH", + "CM_MLPERF_INFERENCE_BERT_PATH", + "CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH", + "CM_MLPERF_INFERENCE_CONF_PATH", + "CM_MLPERF_INFERENCE_DLRM_PATH", + "CM_MLPERF_INFERENCE_DLRM_V2_PATH", + "CM_MLPERF_INFERENCE_GPTJ_PATH", + "CM_MLPERF_INFERENCE_RNNT_PATH", + "CM_MLPERF_INFERENCE_SOURCE", + "CM_MLPERF_INFERENCE_VERSION", + "CM_MLPERF_INFERENCE_VISION_PATH", + "CM_MLPERF_LAST_RELEASE", + "+PYTHONPATH" + ], + "prehook_deps": [ + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_INFERENCE_SOURCE" + }, + "extra_cache_tags": "inference,src", + "force_env_keys": [ + "CM_GIT_*" + ], + "names": [ + "inference-git-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_SHA" + ], + "_submodules.": [ + "CM_GIT_SUBMODULES" + ] + } + } + ], + "print_env_at_the_end": { + "CM_MLPERF_INFERENCE_CONF_PATH": "Path to the MLPerf inference benchmark configuration file", + "CM_MLPERF_INFERENCE_SOURCE": "Path to MLPerf inference benchmark sources" + }, + "tags": [ + "get", + "src", + "source", + "inference", + "inference-src", + "inference-source", + "mlperf", + "mlcommons" + ], + "uid": "4b57186581024797", + "variations": { + "3d-unet": { + "env": { + "CM_SUBMODULE_3D_UNET": "yes" + } + }, + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + }, + "group": "checkout" + }, + "deeplearningexamples": { + "env": { + "CM_SUBMODULE_DEEPLEARNINGEXAMPLES": "yes" + } + }, + "deepsparse": { + "env": { + "CM_GIT_CHECKOUT": "deepsparse", + "CM_GIT_URL": "https://github.com/neuralmagic/inference", + "CM_MLPERF_LAST_RELEASE": "v4.0" + } + }, + "full-history": { + "env": { + "CM_GIT_DEPTH": "" + }, + "group": "git-history" + }, + "gn": { + "env": { + "CM_SUBMODULE_GN": "yes" + } + }, + "no-recurse-submodules": { + "env": { + "CM_GIT_RECURSE_SUBMODULES": "" + } + }, + "nvidia-pycocotools": { + "base": [ + "patch" + ], + "env": { + "CM_GIT_PATCH_FILENAME": "coco.patch" + } + }, + "octoml": { + "base": [ + "short-history" + ], + "env": { + "CM_GIT_URL": "https://github.com/octoml/inference" + } + }, + "openimages-nvidia-pycocotools": { + "base": [ + "patch" + ], + "env": { + "CM_GIT_PATCH_FILENAME": "openimages-pycocotools.patch" + } + }, + "patch": { + "ad": { + "inference-git-repo": { + "tags": "_patch" + } + }, + "env": { + "CM_GIT_PATCH": "yes" + } + }, + "pybind": { + "env": { + "CM_SUBMODULE_PYBIND": "yes" + } + }, + "recurse-submodules": { + "env": { + "CM_GIT_RECURSE_SUBMODULES": " --recurse-submodules" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + } + }, + "sha.#": { + "env": { + "CM_GIT_SHA": "#" + }, + "group": "checkout" + }, + "short-history": { + "default": true, + "env": { + "CM_GIT_DEPTH": "--depth 10" + }, + "group": "git-history" + }, + "submodules.#": { + "env": { + "CM_GIT_SUBMODULES": "#" + } + } + }, + "versions": { + "custom": { + "env": { + "CM_MLPERF_LAST_RELEASE": "v4.0" + } + }, + "deepsparse": { + "env": { + "CM_GIT_CHECKOUT": "deepsparse", + "CM_GIT_URL": "https://github.com/neuralmagic/inference", + "CM_MLPERF_LAST_RELEASE": "v4.0" + } + }, + "main": { + "env": { + "CM_GIT_CHECKOUT": "main", + "CM_MLPERF_LAST_RELEASE": "v4.0" + } + }, + "master": { + "env": { + "CM_GIT_CHECKOUT": "master", + "CM_MLPERF_LAST_RELEASE": "v4.0" + } + }, + "pybind_fix": { + "env": { + "CM_GIT_CHECKOUT": "pybind_update", + "CM_GIT_URL": "https://github.com/GATEOVerflow/inference", + "CM_MLPERF_LAST_RELEASE": "v3.1" + } + }, + "r2.1": { + "env": { + "CM_GIT_CHECKOUT": "v2.1", + "CM_MLPERF_LAST_RELEASE": "v2.1" + } + }, + "r3.0": { + "adr": { + "inference-git-repo": { + "tags": "_tag.v3.0" + } + }, + "env": { + "CM_GIT_CHECKOUT": "", + "CM_MLPERF_LAST_RELEASE": "v3.0" + } + }, + "r3.1": { + "adr": { + "inference-git-repo": { + "tags": "_tag.v3.1" + } + }, + "env": { + "CM_GIT_CHECKOUT": "", + "CM_MLPERF_LAST_RELEASE": "v3.1" + } + }, + "tvm": { + "env": { + "CM_GIT_CHECKOUT": "tvm", + "CM_GIT_URL": "https://github.com/mlcommons/inference", + "CM_MLPERF_LAST_RELEASE": "v3.1" + } + } + } +} diff --git a/script/get-mlperf-inference-src/customize.py b/script/get-mlperf-inference-src/customize.py new file mode 100644 index 0000000000..7916a1bde2 --- /dev/null +++ b/script/get-mlperf-inference-src/customize.py @@ -0,0 +1,106 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + script_path = i['run_script_input']['path'] + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + submodules = [] + possible_submodules = { + "gn": "third_party/gn", + "pybind": "third_party/pybind", + "deeplearningexamples":"language/bert/DeepLearningExamples", + "3d-unet":"vision/medical_imaging/3d-unet-brats19/nnUnet" + } + for submodule in possible_submodules: + env_name = submodule.upper().replace("-","_") + if env.get("CM_SUBMODULE_"+env_name) == "yes": + submodules.append(possible_submodules[submodule]) + + env['CM_GIT_SUBMODULES'] = ",".join(submodules) + + if env.get('CM_GIT_PATCH_FILENAME', '') != '': + patch_file_name = env['CM_GIT_PATCH_FILENAME'] + env['CM_GIT_PATCH_FILEPATHS'] = os.path.join(script_path, 'patch', patch_file_name) + + need_version = env.get('CM_VERSION','') + versions = meta['versions'] + + if need_version!='' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + inference_root = env['CM_MLPERF_INFERENCE_SOURCE'] + env['CM_MLPERF_INFERENCE_VISION_PATH'] = os.path.join(inference_root, 'inference', 'vision') + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] = os.path.join(inference_root, 'vision', 'classification_and_detection') + env['CM_MLPERF_INFERENCE_BERT_PATH'] = os.path.join(inference_root, 'language', 'bert') + env['CM_MLPERF_INFERENCE_GPTJ_PATH'] = os.path.join(inference_root, 'language', 'gpt-j') + env['CM_MLPERF_INFERENCE_RNNT_PATH'] = os.path.join(inference_root, 'speech_recognition', 'rnnt') + env['CM_MLPERF_INFERENCE_DLRM_PATH'] = os.path.join(inference_root, 'recommendation', 'dlrm') + env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'] = os.path.join(inference_root, 'recommendation', 'dlrm_v2') + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] = os.path.join(inference_root,'vision', 'medical_imaging', '3d-unet-kits19') + + env['CM_GET_DEPENDENT_CACHED_PATH'] = inference_root + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] + env['+PYTHONPATH']=[] + env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) + + if env.get('CM_GET_MLPERF_IMPLEMENTATION_ONLY', '') == "yes": + return {'return':0} + + env['CM_MLPERF_INFERENCE_CONF_PATH'] = os.path.join(inference_root, 'mlperf.conf') + env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) + + valid_models = get_valid_models(env['CM_MLPERF_LAST_RELEASE'], env['CM_MLPERF_INFERENCE_SOURCE']) + + + state['CM_MLPERF_INFERENCE_MODELS'] = valid_models + + if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': + env['CM_VERSION'] += "-git-"+env['CM_GIT_REPO_CURRENT_HASH'] + + return {'return':0} + + +def get_valid_models(mlperf_version, mlperf_path): + + import sys + + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + + sys.path.append(submission_checker_dir) + + if not os.path.exists(os.path.join(submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir,"submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) + + import submission_checker as checker + + config = checker.MODEL_CONFIG + + valid_models = config[mlperf_version]["models"] + + return valid_models diff --git a/script/get-mlperf-inference-src/patch/coco.patch b/script/get-mlperf-inference-src/patch/coco.patch new file mode 100644 index 0000000000..1911552067 --- /dev/null +++ b/script/get-mlperf-inference-src/patch/coco.patch @@ -0,0 +1,24 @@ +diff --git a/vision/classification_and_detection/tools/accuracy-openimages.py b/vision/classification_and_detection/tools/accuracy-openimages.py +index 0192dde..7fb0dd6 100644 +--- a/vision/classification_and_detection/tools/accuracy-openimages.py ++++ b/vision/classification_and_detection/tools/accuracy-openimages.py +@@ -34,7 +34,7 @@ def get_args(): + def main(): + args = get_args() + +- cocoGt = COCO(os.path.join(args.openimages_dir, "annotations/openimages-mlperf.json")) ++ cocoGt = COCO(os.path.join(args.openimages_dir, "annotations/openimages-mlperf.json"), use_ext=True) + + if args.use_inv_map: + inv_map = [0] + cocoGt.getCatIds() # First label in inv_map is not used +@@ -98,8 +98,8 @@ def main(): + with open(args.output_file, "w") as fp: + json.dump(detections, fp, sort_keys=True, indent=4) + +- cocoDt = cocoGt.loadRes(args.output_file) # Load from file to bypass error with Python3 +- cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox') ++ cocoDt = cocoGt.loadRes(args.output_file,use_ext=True) # Load from file to bypass error with Python3 ++ cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox',use_ext=True) + cocoEval.params.imgIds = list(image_ids) + cocoEval.evaluate() + cocoEval.accumulate() diff --git a/script/get-mlperf-inference-src/patch/git.patch b/script/get-mlperf-inference-src/patch/git.patch new file mode 100644 index 0000000000..b822563c9a --- /dev/null +++ b/script/get-mlperf-inference-src/patch/git.patch @@ -0,0 +1,1925 @@ +diff --git a/tools/submission/submission_checker.py b/tools/submission/submission_checker.py +new file mode 100755 +index 0000000..d28fb03 +--- /dev/null ++++ b/tools/submission/submission_checker.py +@@ -0,0 +1,1906 @@ ++""" ++A checker for mlperf inference submissions ++""" ++ ++from __future__ import division ++from __future__ import print_function ++from __future__ import unicode_literals ++ ++import argparse ++import datetime ++import json ++import logging ++import os ++import re ++import sys ++ ++from log_parser import MLPerfLog ++ ++# pylint: disable=missing-docstring ++ ++ ++logging.basicConfig(level=logging.INFO) ++log = logging.getLogger("main") ++ ++submission_checker_dir = os.path.dirname(os.path.realpath(__file__)) ++ ++MODEL_CONFIG = { ++ "v0.5": { ++ "models": ["ssd-small", "ssd-large", "mobilenet", "resnet", "gnmt"], ++ "required-scenarios-datacenter": { ++ # anything goes ++ }, ++ "optional-scenarios-datacenter": { ++ # anything goes ++ }, ++ "required-scenarios-edge": { ++ # anything goes ++ }, ++ "optional-scenarios-edge": { ++ # anything goes ++ }, ++ "accuracy-target": { ++ "mobilenet": ("acc", 71.68 * 0.98), ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "gnmt": ("bleu", 23.9 * 0.99), ++ }, ++ "performance-sample-count": { ++ "mobilenet": 1024, ++ "resnet": 1024, ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "gnmt": 3903900, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 3133965575612453542, ++ "sample_index_rng_seed": 665484352860916858, ++ "schedule_rng_seed": 3622009729038561421, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 195, ++ "sample_index_rng_seed" : 235, ++ "schedule_rng_seed" : 634, ++ }, ++ "ignore_errors": [ ++ "check for ERROR in detailed", ++ "Loadgen built with uncommitted changes", ++ "Ran out of generated queries to issue before the minimum query count and test duration were reached", ++ "CAS failed", ++ ], ++ }, ++ "v0.7": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Offline"], ++ "ssd-large": ["Offline"], ++ "rnnt": ["Offline"], ++ "bert-99": ["Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ "resnet": ["MultiStream"], ++ "ssd-small": ["MultiStream"], ++ "ssd-large": ["MultiStream"], ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.853 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.853 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 16, ++ "3d-unet-99.9": 16, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 12786827339337101903, ++ "sample_index_rng_seed": 12640797754436136668, ++ "schedule_rng_seed": 3135815929913719677, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ "CAS failed", ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000, "MultiStream": 50000000}, ++ "ssd-small": {"MultiStream": 50000000}, ++ "ssd-large": {"Server": 100000000, "MultiStream": 66000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "MultiStream": 270336, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v1.0": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Offline"], ++ "ssd-large": ["Offline"], ++ "rnnt": ["Offline"], ++ "bert-99": ["Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ "resnet": ["MultiStream"], ++ "ssd-small": ["MultiStream"], ++ "ssd-large": ["MultiStream"], ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ "resnet": ["MultiStream", "Server"], ++ "ssd-small": ["MultiStream"], ++ "ssd-large": ["MultiStream", "Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.853 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.853 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 16, ++ "3d-unet-99.9": 16, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 7322528924094909334, ++ "sample_index_rng_seed": 1570999273408051088, ++ "schedule_rng_seed": 3507442325620259414, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000, "MultiStream": 50000000}, ++ "ssd-small": {"MultiStream": 50000000}, ++ "ssd-large": {"Server": 100000000, "MultiStream": 66000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "MultiStream": 270336, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v1.1": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Offline"], ++ "ssd-large": ["Offline"], ++ "rnnt": ["Offline"], ++ "bert-99": ["Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.853 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.853 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 16, ++ "3d-unet-99.9": 16, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 1624344308455410291, ++ "sample_index_rng_seed": 517984244576520566, ++ "schedule_rng_seed": 10051496985653635065, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000, "MultiStream": 50000000}, ++ "ssd-small": {"MultiStream": 50000000}, ++ "ssd-large": {"Server": 100000000, "MultiStream": 66000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v2.0": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ # FIXME: required/optional scenarios for v2.0 needs to be filled up correctly; below lists are temporary ++ "required-scenarios-datacenter": { ++ "resnet": ["Server", "Offline"], ++ "ssd-large": ["Server", "Offline"], ++ "rnnt": ["Server", "Offline"], ++ "bert-99": ["Server", "Offline"], ++ "bert-99.9": ["Server", "Offline"], ++ "dlrm-99": ["Server", "Offline"], ++ "dlrm-99.9": ["Server", "Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "MultiStream", "Offline"], ++ "ssd-small": ["SingleStream", "MultiStream", "Offline"], ++ "ssd-large": ["SingleStream", "MultiStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "ssd-small": ["SingleStream", "Offline", "MultiStream"], ++ "ssd-large": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "rnnt": ["SingleStream", "Offline", "Server"], ++ "bert-99": ["SingleStream", "Offline", "Server"], ++ "bert-99.9": ["Offline", "Server"], ++ "dlrm-99": ["Offline", "Server"], ++ "dlrm-99.9": ["Offline", "Server"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.86331 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.86331 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 42, ++ "3d-unet-99.9": 42, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ "ssd_resnet101_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet101_v1_fpn_1024x1024": "ssd-large", ++ "ssd_resnet152_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet152_v1_fpn_1024x1024": "ssd-large", ++ "rcnn-resnet50-lowproposals-coco": "ssd-large", ++ "rcnn-inception-resnet-v2-lowproposals-coco": "ssd-large", ++ "rcnn-inception-v2-coco": "ssd-large", ++ "rcnn-nas-lowproposals-coco": "ssd-large", ++ "rcnn-resnet101-lowproposals-coco": "ssd-large", ++ "ssd_mobilenet_v1_coco": "ssd-small", ++ "ssd_mobilenet_v1_fpn_640x640": "ssd-small", ++ "ssd_mobilenet_v1_quantized_coco": "ssd-small", ++ "ssd_mobilenet_v2_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_1024x1024": "ssd-large", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 6655344265603136530, ++ "sample_index_rng_seed": 15863379492028895792, ++ "schedule_rng_seed": 12662793979680847247, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000}, ++ "ssd-large": {"Server": 100000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "MultiStream": 270336, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v2.1": { ++ "models": [ ++ "resnet", "retinanet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Server", "Offline"], ++ "retinanet": ["Server", "Offline"], ++ "rnnt": ["Server", "Offline"], ++ "bert-99": ["Server", "Offline"], ++ "bert-99.9": ["Server", "Offline"], ++ "dlrm-99": ["Server", "Offline"], ++ "dlrm-99.9": ["Server", "Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "MultiStream", "Offline"], ++ "retinanet": ["SingleStream", "MultiStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "retinanet": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "rnnt": ["SingleStream", "Offline", "Server"], ++ "bert-99": ["SingleStream", "Offline", "Server"], ++ "bert-99.9": ["Offline", "Server"], ++ "dlrm-99": ["Offline", "Server"], ++ "dlrm-99.9": ["Offline", "Server"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "retinanet": ("mAP", 37.55 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.86170 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.86170 * 0.999), ++ }, ++ "performance-sample-count": { ++ "resnet": 1024, ++ # TODO: Update perf sample count for retinanet ++ "retinanet": 64, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 42, ++ "3d-unet-99.9": 42, ++ }, ++ # TODO: Update this list. ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ "ssd_resnet101_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet101_v1_fpn_1024x1024": "ssd-large", ++ "ssd_resnet152_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet152_v1_fpn_1024x1024": "ssd-large", ++ "rcnn-resnet50-lowproposals-coco": "ssd-large", ++ "rcnn-inception-resnet-v2-lowproposals-coco": "ssd-large", ++ "rcnn-inception-v2-coco": "ssd-large", ++ "rcnn-nas-lowproposals-coco": "ssd-large", ++ "rcnn-resnet101-lowproposals-coco": "ssd-large", ++ "ssd_mobilenet_v1_coco": "ssd-small", ++ "ssd_mobilenet_v1_fpn_640x640": "ssd-small", ++ "ssd_mobilenet_v1_quantized_coco": "ssd-small", ++ "ssd_mobilenet_v2_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_1024x1024": "ssd-large", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 14284205019438841327, ++ "sample_index_rng_seed": 4163916728725999944, ++ "schedule_rng_seed": 299063814864929621, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000}, ++ "retinanet": {"Server": 100000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "retinanet": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++} ++ ++VALID_DIVISIONS = ["open", "closed", "network"] ++VALID_AVAILABILITIES = ["available", "preview", "rdi"] ++REQUIRED_PERF_FILES = ["mlperf_log_summary.txt", "mlperf_log_detail.txt"] ++OPTIONAL_PERF_FILES = ["mlperf_log_accuracy.json"] ++REQUIRED_PERF_POWER_FILES = ["spl.txt"] ++REQUIRED_POWER_FILES = ["client.json", "client.log", "ptd_logs.txt", "server.json", "server.log"] ++REQUIRED_ACC_FILES = ["mlperf_log_summary.txt", "mlperf_log_detail.txt", "accuracy.txt", "mlperf_log_accuracy.json"] ++REQUIRED_MEASURE_FILES = ["mlperf.conf", "user.conf", "README.md"] ++MS_TO_NS = 1000 * 1000 ++S_TO_MS = 1000 ++MAX_ACCURACY_LOG_SIZE = 10 * 1024 ++OFFLINE_MIN_SPQ = 24576 ++TEST_DURATION_MS_PRE_1_0 = 60000 ++TEST_DURATION_MS = 600000 ++REQUIRED_COMP_PER_FILES = ["mlperf_log_summary.txt", "mlperf_log_detail.txt"] ++REQUIRED_TEST01_ACC_FILES_1 = ["mlperf_log_accuracy.json", "accuracy.txt"] ++REQUIRED_TEST01_ACC_FILES = REQUIRED_TEST01_ACC_FILES_1 + ["baseline_accuracy.txt", "compliance_accuracy.txt"] ++ ++SCENARIO_MAPPING = { ++ "singlestream": "SingleStream", ++ "multistream": "MultiStream", ++ "server": "Server", ++ "offline": "Offline", ++} ++ ++RESULT_FIELD = { ++ "Offline": "Samples per second", ++ "SingleStream": "90th percentile latency (ns)", ++ "MultiStream": "Samples per query", ++ "Server": "Scheduled samples per second" ++} ++ ++RESULT_FIELD_NEW = { ++ "v0.5": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v0.7": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v1.0": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v1.1": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v2.0": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "early_stopping_latency_ss", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "early_stopping_latency_ms", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v2.1": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "early_stopping_latency_ss", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "early_stopping_latency_ms", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++} ++ ++ACC_PATTERN = { ++ "acc": ++ r"^accuracy=([\d\.]+).*", ++ "AUC": ++ r"^AUC=([\d\.]+).*", ++ "mAP": ++ r"^mAP=([\d\.]+).*", ++ "bleu": ++ r"^BLEU\:\s*([\d\.]+).*", ++ "F1": ++ r"^{[\"\']exact_match[\"\']\:\s*[\d\.]+,\s*[\"\']f1[\"\']\:\s*([\d\.]+)}", ++ "WER": ++ r"Word Error Rate\:.*, accuracy=([0-9\.]+)%", ++ "DICE": ++ r"Accuracy\:\s*mean\s*=\s*([\d\.]+).*", ++} ++ ++SYSTEM_DESC_REQUIRED_FIELDS = [ ++ "division", "submitter", "status", "system_name", "number_of_nodes", "host_processor_model_name", ++ "host_processors_per_node", "host_processor_core_count", "host_memory_capacity", "host_storage_capacity", ++ "host_storage_type", "accelerators_per_node", "accelerator_model_name", "accelerator_memory_capacity", ++ "framework", "operating_system" ++] ++ ++SYSTEM_DESC_REQUIED_FIELDS_SINCE_V1 = [ ++ "system_type", "other_software_stack", "host_processor_frequency", "host_processor_caches", ++ "host_memory_configuration", "host_processor_interconnect", "host_networking", "host_networking_topology", ++ "accelerator_frequency", "accelerator_host_interconnect", "accelerator_interconnect", ++ "accelerator_interconnect_topology", "accelerator_memory_configuration", ++ "accelerator_on-chip_memories", "cooling", "hw_notes", "sw_notes" ++] ++ ++SYSTEM_DESC_REQUIED_FIELDS_POWER = [ ++ "power_management", "filesystem", "boot_firmware_version", "management_firmware_version", "other_hardware", ++ "number_of_type_nics_installed", "nics_enabled_firmware", "nics_enabled_os", "nics_enabled_connected", ++ "network_speed_mbit", "power_supply_quantity_and_rating_watts", "power_supply_details", "disk_drives", ++ "disk_controllers" ++] ++ ++SYSTEM_DESC_IS_NETWORK_MODE = "is_network" ++SYSTEM_DESC_REQUIRED_FIELDS_NETWORK_MODE = [ ++ SYSTEM_DESC_IS_NETWORK_MODE, "network_type", "network_media", "network_rate", "nic_loadgen", ++ "number_nic_loadgen", "net_software_stack_loadgen", "network_protocol", "number_connections", "nic_sut", ++ "number_nic_sut", "net_software_stack_sut", "network_topology" ++] ++NETWORK_MODE_REQUIRED_SUBSTRING_IN_SUT_NAME = "Network SUT" ++ ++SYSTEM_IMP_REQUIRED_FILES = [ ++ "input_data_types", "retraining", "starting_weights_filename", "weight_data_types", ++ "weight_transformations", ++] ++ ++ ++class Config(): ++ """Select config value by mlperf version and submission type.""" ++ def __init__(self, version, extra_model_benchmark_map, ignore_uncommited=False, more_power_check=False): ++ self.base = MODEL_CONFIG.get(version) ++ self.set_extra_model_benchmark_map(extra_model_benchmark_map) ++ self.version = version ++ self.models = self.base["models"] ++ self.seeds = self.base["seeds"] ++ self.test05_seeds = self.base["test05_seeds"] ++ self.accuracy_target = self.base["accuracy-target"] ++ self.performance_sample_count = self.base["performance-sample-count"] ++ self.latency_constraint = self.base.get("latency-constraint", {}) ++ self.min_queries = self.base.get("min-queries", {}) ++ self.required = None ++ self.optional = None ++ self.ignore_uncommited = ignore_uncommited ++ self.more_power_check = more_power_check ++ ++ def set_extra_model_benchmark_map(self, extra_model_benchmark_map): ++ if extra_model_benchmark_map: ++ for mapping in extra_model_benchmark_map.split(';'): ++ model_name, mlperf_model = mapping.split(':') ++ self.base['model_mapping'][model_name] = mlperf_model ++ ++ def set_type(self, submission_type): ++ if submission_type is None and self.version in ["v0.5"]: ++ return ++ elif submission_type == "datacenter": ++ self.required = self.base["required-scenarios-datacenter"] ++ self.optional = self.base["optional-scenarios-datacenter"] ++ elif submission_type == "edge": ++ self.required = self.base["required-scenarios-edge"] ++ self.optional = self.base["optional-scenarios-edge"] ++ elif submission_type == "datacenter,edge" or submission_type == "edge,datacenter": ++ self.required = self.base["required-scenarios-datacenter-edge"] ++ self.optional = self.base["optional-scenarios-datacenter-edge"] ++ else: ++ raise ValueError("invalid system type") ++ ++ def get_mlperf_model(self, model): ++ # preferred - user is already using the official name ++ if model in self.models: ++ return model ++ ++ # simple mapping, ie resnet50->resnet ? ++ mlperf_model = self.base["model_mapping"].get(model) ++ if mlperf_model: ++ return mlperf_model ++ ++ # try to guess ++ if "ssdlite" in model or "ssd-inception" in model or "yolo" in model or \ ++ "ssd-mobilenet" in model or "ssd-resnet50" in model: ++ model = "ssd-small" ++ elif "mobilenet" in model: ++ model = "mobilenet" ++ elif "efficientnet" in model or "resnet50" in model: ++ model = "resnet" ++ elif "rcnn" in model: ++ model = "ssd-small" ++ elif "bert-99.9" in model: ++ model = "bert-99.9" ++ elif "bert-99" in model: ++ model = "bert-99" ++ # map again, for example v0.7 does not have mobilenet so it needs to be mapped to resnet ++ mlperf_model = self.base["model_mapping"].get(model, model) ++ return mlperf_model ++ ++ def get_required(self, model): ++ if self.version in ["v0.5"]: ++ return set() ++ model = self.get_mlperf_model(model) ++ if model not in self.required: ++ return None ++ return set(self.required[model]) ++ ++ def get_optional(self, model): ++ if self.version in ["v0.5"]: ++ return set(["SingleStream", "MultiStream", "Server", "Offline"]) ++ model = self.get_mlperf_model(model) ++ if model not in self.optional: ++ return set() ++ return set(self.optional[model]) ++ ++ def get_accuracy_target(self, model): ++ if model not in self.accuracy_target: ++ raise ValueError("model not known: " + model) ++ return self.accuracy_target[model] ++ ++ def get_performance_sample_count(self, model): ++ model = self.get_mlperf_model(model) ++ if model not in self.performance_sample_count: ++ raise ValueError("model not known: " + model) ++ return self.performance_sample_count[model] ++ ++ def ignore_errors(self, line): ++ for error in self.base["ignore_errors"]: ++ if error in line: ++ return True ++ if self.ignore_uncommited and "ERROR : Loadgen built with uncommitted changes!" in line: ++ return True ++ return False ++ ++ def get_min_query_count(self, model, scenario): ++ model = self.get_mlperf_model(model) ++ if model not in self.min_queries: ++ raise ValueError("model not known: " + model) ++ return self.min_queries[model].get(scenario) ++ ++ def has_new_logging_format(self): ++ return self.version not in ["v0.5", "v0.7"] ++ ++ def uses_legacy_multistream(self): ++ return self.version in ["v0.5", "v0.7", "v1.0", "v1.1"] ++ ++ ++ def uses_early_stopping(self, scenario): ++ return (self.version not in ["v0.5", "v0.7", "v1.0", "v1.1"]) and ( ++ scenario in ["Server", "SingleStream", "MultiStream"] ++ ) ++ ++ def has_query_count_in_log(self): ++ return self.version not in ["v0.5", "v0.7", "v1.0", "v1.1"] ++ ++ ++ def has_power_utc_timestamps(self): ++ return self.version not in ["v0.5", "v0.7", "v1.0"] ++ ++ ++ ++def get_args(): ++ """Parse commandline.""" ++ parser = argparse.ArgumentParser() ++ parser.add_argument("--input", required=True, help="submission directory") ++ parser.add_argument("--version", default="v2.1", choices=list(MODEL_CONFIG.keys()), help="mlperf version") ++ parser.add_argument("--submitter", help="filter to submitter") ++ parser.add_argument("--csv", default="summary.csv", help="csv file with results") ++ parser.add_argument("--skip_compliance", action="store_true", help="Pass this cmdline option to skip checking compliance/ dir") ++ parser.add_argument("--extra-model-benchmark-map", help="extra model name to benchmark mapping") ++ parser.add_argument("--debug", action="store_true", help="extra debug output") ++ parser.add_argument("--submission-exceptions", action="store_true", help="ignore certain errors for submission") ++ parser.add_argument("--more-power-check", action="store_true", help="apply Power WG's check.py script on each power submission. Requires Python 3.7+") ++ args = parser.parse_args() ++ return args ++ ++ ++def list_dir(*path): ++ path = os.path.join(*path) ++ return [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))] ++ ++ ++def list_files(*path): ++ path = os.path.join(*path) ++ return [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] ++ ++ ++def split_path(m): ++ return m.replace("\\", "/").split("/") ++ ++ ++def find_error_in_detail_log(config, fname): ++ is_valid = True ++ if not os.path.exists(fname): ++ log.error("%s is missing", fname) ++ is_valid = False ++ else: ++ if config.has_new_logging_format(): ++ mlperf_log = MLPerfLog(fname) ++ if mlperf_log.has_error(): ++ if config.ignore_uncommited: ++ has_other_errors = False ++ for error in mlperf_log.get_errors(): ++ if "Loadgen built with uncommitted changes!" not in error["value"]: ++ has_other_errors = True ++ ++ log.error("%s contains errors:", fname) ++ for error in mlperf_log.get_errors(): ++ log.error("%s", error["value"]) ++ ++ if not config.ignore_uncommited or has_other_errors: ++ is_valid = False ++ else: ++ with open(fname, "r") as f: ++ for line in f: ++ # look for: ERROR ++ if "ERROR" in line: ++ if config.ignore_errors(line): ++ if "ERROR : Loadgen built with uncommitted changes!" in line: ++ log.warning("%s contains error: %s", fname, line) ++ continue ++ log.error("%s contains error: %s", fname, line) ++ is_valid = False ++ return is_valid ++ ++ ++def check_accuracy_dir(config, model, path, verbose): ++ is_valid = False ++ acc = None ++ hash_val = None ++ acc_type, acc_target = config.get_accuracy_target(model) ++ pattern = ACC_PATTERN[acc_type] ++ with open(os.path.join(path, "accuracy.txt"), "r", encoding="utf-8") as f: ++ for line in f: ++ m = re.match(pattern, line) ++ if m: ++ acc = m.group(1) ++ m = re.match(r"^hash=([\w\d]+)$", line) ++ if m: ++ hash_val = m.group(1) ++ if hash_val and acc: ++ break ++ ++ if acc and float(acc) >= acc_target: ++ is_valid = True ++ elif verbose: ++ log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) ++ ++ if not hash_val: ++ log.error("%s not hash value for mlperf_log_accuracy.json", path) ++ is_valid = False ++ ++ # check mlperf_log_accuracy.json ++ fname = os.path.join(path, "mlperf_log_accuracy.json") ++ if not os.path.exists(fname): ++ log.error("%s is missing", fname) ++ is_valid = False ++ else: ++ if os.stat(fname).st_size > MAX_ACCURACY_LOG_SIZE: ++ log.error("%s is not truncated", fname) ++ is_valid = False ++ ++ # check if there are any errors in the detailed log ++ fname = os.path.join(path, "mlperf_log_detail.txt") ++ if not find_error_in_detail_log(config, fname): ++ is_valid = False ++ ++ return is_valid, acc ++ ++ ++def check_performance_dir(config, model, path, scenario_fixed, division, system_json): ++ is_valid = False ++ rt = {} ++ ++ # look for: Result is: VALID ++ if config.has_new_logging_format(): ++ fname = os.path.join(path, "mlperf_log_detail.txt") ++ mlperf_log = MLPerfLog(fname) ++ if "result_validity" in mlperf_log.get_keys() and mlperf_log["result_validity"] == "VALID": ++ is_valid = True ++ performance_sample_count = mlperf_log["effective_performance_sample_count"] ++ qsl_rng_seed = mlperf_log["effective_qsl_rng_seed"] ++ sample_index_rng_seed = mlperf_log["effective_sample_index_rng_seed"] ++ schedule_rng_seed = mlperf_log["effective_schedule_rng_seed"] ++ scenario = mlperf_log["effective_scenario"] ++ scenario_for_res = "MultiStreamLegacy" if scenario == "MultiStream" and config.uses_legacy_multistream() else\ ++ scenario ++ res = float(mlperf_log[RESULT_FIELD_NEW[config.version][scenario_for_res]]) ++ latency_99_percentile = mlperf_log["result_99.00_percentile_latency_ns"] ++ latency_mean = mlperf_log["result_mean_latency_ns"] ++ if scenario in ["MultiStream"]: ++ latency_99_percentile = mlperf_log["result_99.00_percentile_per_query_latency_ns"] ++ latency_mean = mlperf_log["result_mean_query_latency_ns"] ++ min_query_count = mlperf_log["effective_min_query_count"] ++ samples_per_query = mlperf_log["effective_samples_per_query"] ++ min_duration = mlperf_log["effective_min_duration_ms"] ++ if scenario == "SingleStream": ++ # qps_wo_loadgen_overhead is only used for inferring Offline from SingleStream; only for old submissions ++ qps_wo_loadgen_overhead = mlperf_log["result_qps_without_loadgen_overhead"] ++ sut_name = mlperf_log["sut_name"] ++ else: ++ fname = os.path.join(path, "mlperf_log_summary.txt") ++ with open(fname, "r") as f: ++ for line in f: ++ m = re.match(r"^Result\s+is\s*\:\s+VALID", line) ++ if m: ++ is_valid = True ++ m = re.match(r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.][\w\+\.\s]*)", line) ++ if m: ++ rt[m.group(1).strip()] = m.group(2).strip() ++ performance_sample_count = int(rt['performance_sample_count']) ++ qsl_rng_seed = int(rt["qsl_rng_seed"]) ++ sample_index_rng_seed = int(rt["sample_index_rng_seed"]) ++ schedule_rng_seed = int(rt["schedule_rng_seed"]) ++ scenario = rt["Scenario"].replace(" ","") ++ res = float(rt[RESULT_FIELD[scenario]]) ++ latency_99_percentile = int(rt['99.00 percentile latency (ns)']) ++ latency_mean = int(rt['Mean latency (ns)']) ++ min_query_count = int(rt['min_query_count']) ++ samples_per_query = int(rt['samples_per_query']) ++ min_duration = int(rt["min_duration (ms)"]) ++ if scenario == "SingleStream": ++ qps_wo_loadgen_overhead = float(rt["QPS w/o loadgen overhead"]) ++ sut_name = str(rt['System Under Test (SUT) name: ']) ++ ++ # check if there are any errors in the detailed log ++ fname = os.path.join(path, "mlperf_log_detail.txt") ++ if not find_error_in_detail_log(config, fname): ++ is_valid = False ++ ++ required_performance_sample_count = config.get_performance_sample_count(model) ++ if performance_sample_count < required_performance_sample_count: ++ log.error("%s performance_sample_count, found %d, needs to be >= %d", ++ fname, performance_sample_count, required_performance_sample_count) ++ is_valid = False ++ ++ config_seeds = config.seeds if "TEST05" not in fname else config.test05_seeds ++ if qsl_rng_seed != config_seeds["qsl_rng_seed"]: ++ log.error("%s qsl_rng_seed is wrong, expected=%s, found=%s", fname, config_seeds["qsl_rng_seed"], qsl_rng_seed) ++ if sample_index_rng_seed != config_seeds["sample_index_rng_seed"]: ++ log.error("%s sample_index_rng_seed is wrong, expected=%s, found=%s", fname, config_seeds["sample_index_rng_seed"], sample_index_rng_seed) ++ if schedule_rng_seed != config_seeds["schedule_rng_seed"]: ++ log.error("%s schedule_rng_seed is wrong, expected=%s, found=%s", fname, config_seeds["schedule_rng_seed"], schedule_rng_seed) ++ ++ if scenario == "SingleStream" or (scenario == "MultiStream" and not config.uses_legacy_multistream()): ++ res /= MS_TO_NS ++ ++ # Check if current scenario (and version) uses early stopping ++ uses_early_stopping = config.uses_early_stopping(scenario) ++ ++ if config.version != "v0.5": ++ # FIXME: for open we script this because open can submit in all scenarios ++ # not supported for v0.5 ++ ++ if uses_early_stopping: ++ # check if early_stopping condition was met ++ if not mlperf_log["early_stopping_met"]: ++ early_stopping_result = mlperf_log["early_stopping_result"] ++ log.error("Early stopping condition was not met, msg=%s", early_stopping_result) ++ ++ # If the scenario has a target latency (Server scenario), check ++ # that the target latency that was passed to the early stopping ++ # is less than the target latency. ++ target_latency = config.latency_constraint.get(model, dict()).get(scenario) ++ if target_latency: ++ early_stopping_latency_ns = mlperf_log["effective_target_latency_ns"] ++ log.info("Target latency: %s, Early Stopping Latency: %s, Scenario: %s", ++ target_latency, early_stopping_latency_ns, scenario) ++ if early_stopping_latency_ns > target_latency: ++ log.error("%s Latency constraint with early stopping not met, expected=%s, found=%s", ++ fname, target_latency, early_stopping_latency_ns) ++ ++ else: ++ # check if the benchmark meets latency constraint ++ target_latency = config.latency_constraint.get(model, dict()).get(scenario) ++ log.info("Target latency: %s, Latency: %s, Scenario: %s", target_latency, latency_99_percentile, scenario) ++ if target_latency: ++ if latency_99_percentile > target_latency: ++ log.error("%s Latency constraint not met, expected=%s, found=%s", ++ fname, target_latency, latency_99_percentile) ++ ++ # Check Minimum queries were issued to meet test duration ++ # Check if this run uses early stopping. If it does, get the ++ # min_queries from the detail log, otherwise get this value ++ # from the config ++ if not uses_early_stopping: ++ required_min_query_count = config.get_min_query_count(model, scenario) ++ if required_min_query_count and min_query_count < required_min_query_count: ++ log.error("%s Required minimum Query Count not met by user config, Expected=%s, Found=%s", ++ fname, required_min_query_count, min_query_count) ++ ++ if scenario == "Offline" and (samples_per_query < OFFLINE_MIN_SPQ): ++ log.error("%s Required minimum samples per query not met by user config, Expected=%s, Found=%s", ++ fname, OFFLINE_MIN_SPQ, samples_per_query) ++ ++ # Test duration of 600s is met ++ required_min_duration = TEST_DURATION_MS_PRE_1_0 if config.version in ["v0.5", "v0.7"] else TEST_DURATION_MS ++ if min_duration < required_min_duration: ++ log.error("%s Test duration lesser than 600s in user config. expected=%s, found=%s", ++ fname, required_min_duration, min_duration) ++ ++ inferred = False ++ # special case for results inferred from different scenario ++ if scenario_fixed in ["Offline"] and scenario in ["SingleStream"]: ++ inferred = True ++ res = qps_wo_loadgen_overhead ++ ++ if (scenario_fixed in ["Offline"] and not config.uses_legacy_multistream()) and scenario in ["MultiStream"]: ++ inferred = True ++ res = samples_per_query * S_TO_MS / (latency_mean / MS_TO_NS) ++ ++ if (scenario_fixed in ["MultiStream"] and not config.uses_legacy_multistream()) and scenario in ["SingleStream"]: ++ inferred = True ++ # samples_per_query does not match with the one reported in the logs ++ # when inferring MultiStream from SingleStream ++ samples_per_query = 8 ++ if uses_early_stopping: ++ early_stopping_latency_ms = mlperf_log["early_stopping_latency_ms"] ++ if early_stopping_latency_ms == 0: ++ log.error("Not enough samples were processed for early stopping to make an estimate") ++ is_valid = False ++ res = (early_stopping_latency_ms * samples_per_query) / MS_TO_NS ++ else: ++ res = (latency_99_percentile * samples_per_query) / MS_TO_NS ++ ++ is_network_system, is_network_mode_valid = is_system_over_network(division, system_json, path) ++ is_valid &= is_network_mode_valid ++ if is_network_system: ++ # for network mode verify the SUT name is valid, accodring to the rules (must include "Network SUT" in name) ++ if NETWORK_MODE_REQUIRED_SUBSTRING_IN_SUT_NAME not in sut_name: ++ log.error( ++ f"{fname} invalid sut name for network mode. expecting the substring '{NETWORK_MODE_REQUIRED_SUBSTRING_IN_SUT_NAME}' got '{sut_name}'") ++ is_valid = False ++ ++ return is_valid, res, inferred ++ ++ ++def check_power_dir(power_path, ranging_path, testing_path, scenario_fixed, config): ++ ++ more_power_check = config.more_power_check ++ ++ is_valid = True ++ power_metric = 0 ++ ++ # check if all the required files are present ++ required_files = REQUIRED_PERF_FILES + REQUIRED_PERF_POWER_FILES ++ diff = files_diff(list_files(testing_path), required_files, OPTIONAL_PERF_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", testing_path, diff) ++ is_valid = False ++ diff = files_diff(list_files(ranging_path), required_files, OPTIONAL_PERF_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", ranging_path, diff) ++ is_valid = False ++ diff = files_diff(list_files(power_path), REQUIRED_POWER_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", power_path, diff) ++ is_valid = False ++ ++ # parse the power logs ++ if config.has_power_utc_timestamps(): ++ server_timezone = datetime.timedelta(0) ++ client_timezone = datetime.timedelta(0) ++ else: ++ server_json_fname = os.path.join(power_path, "server.json") ++ with open(server_json_fname) as f: ++ server_timezone = datetime.timedelta(seconds=json.load(f)["timezone"]) ++ client_json_fname = os.path.join(power_path, "client.json") ++ with open(client_json_fname) as f: ++ client_timezone = datetime.timedelta(seconds=json.load(f)["timezone"]) ++ detail_log_fname = os.path.join(testing_path, "mlperf_log_detail.txt") ++ mlperf_log = MLPerfLog(detail_log_fname) ++ datetime_format = '%m-%d-%Y %H:%M:%S.%f' ++ power_begin = datetime.datetime.strptime(mlperf_log["power_begin"], datetime_format) + client_timezone ++ power_end = datetime.datetime.strptime(mlperf_log["power_end"], datetime_format) + client_timezone ++ # Obtain the scenario also from logs to check if power is inferred ++ if config.has_new_logging_format(): ++ scenario = mlperf_log["effective_scenario"] ++ else: ++ rt = {} ++ fname = os.path.join(testing_path, "mlperf_log_summary.txt") ++ with open(fname, "r") as f: ++ for line in f: ++ m = re.match(r"^Result\s+is\s*\:\s+VALID", line) ++ if m: ++ is_valid = True ++ m = re.match(r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.][\w\+\.\s]*)", line) ++ if m: ++ rt[m.group(1).strip()] = m.group(2).strip() ++ scenario = rt["Scenario"].replace(" ","") ++ spl_fname = os.path.join(testing_path, "spl.txt") ++ power_list = [] ++ with open(spl_fname) as f: ++ for line in f: ++ timestamp = datetime.datetime.strptime(line.split(",")[1], datetime_format) + server_timezone ++ if timestamp > power_begin and timestamp < power_end: ++ power_list.append(float(line.split(",")[3])) ++ if len(power_list) == 0: ++ log.error("%s has no power samples falling in power range: %s - %s", spl_fname, power_begin, power_end) ++ is_valid = False ++ else: ++ avg_power = sum(power_list) / len(power_list) ++ power_duration = (power_end - power_begin).total_seconds() ++ if scenario_fixed in ["Offline", "Server"]: ++ # In Offline and Server scenarios, the power metric is in W. ++ power_metric = avg_power ++ else: ++ # In SingleStream and MultiStream scenarios, the power metric is in J/query. ++ assert scenario_fixed in ["MultiStream", "SingleStream"], "Unknown scenario: {:}".format(scenario_fixed) ++ if not config.has_query_count_in_log(): ++ # Before v2.0, LoadGen does NOT print out the actual number of queries in detail logs. There is a ++ # "generated_query_count", but LoadGen exits early when the min_duration has been met, so it is not equal to ++ # the actual number of queries. To work around it, make use of "result_qps_with_loadgen_overhead", which is ++ # defined as: (sample_count - 1) / pr.final_query_issued_time, where final_query_issued_time can be ++ # approximated by power_duration (off by one query worth of latency, which is in general negligible compared ++ # to 600-sec total runtime and can be offsetted by removing the "+1" when reconstructing the sample_count). ++ # As for MultiStream, it always runs for 270336 queries, so using "generated_query_count" as above is fine. ++ if scenario_fixed in ["MultiStream"]: ++ num_queries = mlperf_log["generated_query_count"] * mlperf_log["generated_samples_per_query"] ++ elif scenario_fixed in ["SingleStream"]: ++ num_queries = mlperf_log["result_qps_with_loadgen_overhead"] * power_duration ++ else: ++ # Starting from v2.0, LoadGen logs the actual number of issued queries. ++ num_queries = int(mlperf_log["result_query_count"]) ++ power_metric = avg_power * power_duration / num_queries ++ ++ if (scenario_fixed in ["MultiStream"] and not config.uses_legacy_multistream()) and scenario in ["SingleStream"]: ++ samples_per_query = 8 ++ power_metric = avg_power * power_duration * samples_per_query / num_queries ++ ++ if more_power_check: ++ python_version_major = int(sys.version.split(" ")[0].split(".")[0]) ++ python_version_minor = int(sys.version.split(" ")[0].split(".")[1]) ++ assert python_version_major == 3 and python_version_minor >= 7, "The --more-power-check only supports Python 3.7+" ++ assert os.path.exists(os.path.join(submission_checker_dir, "power-dev", "compliance", "check.py")), \ ++ "Please run 'git submodule update --init tools/submission/power-dev' to get Power WG's check.py." ++ sys.path.insert(0, os.path.join(submission_checker_dir, "power-dev")) ++ from compliance.check import check as check_power_more ++ perf_path = os.path.dirname(power_path) ++ check_power_result = check_power_more(perf_path) ++ sys.stdout.flush() ++ sys.stderr.flush() ++ if check_power_result != 0: ++ log.error("Power WG check.py did not pass for: %s", perf_path) ++ is_valid = False ++ ++ return is_valid, power_metric ++ ++ ++ ++def files_diff(list1, list2, optional=None): ++ """returns a list of files that are missing or added.""" ++ if not optional: ++ optional = [] ++ optional = optional + ["mlperf_log_trace.json", "results.json", ".gitkeep"] ++ return set(list1).symmetric_difference(set(list2)) - set(optional) ++ ++def is_system_over_network(division, system_json, path): ++ """ ++ Verify whether the submitted system is over network and whether it is valid for the division ++ ++ for 'network' division, it is mandatory that the system is over-network ++ for 'closed' division, the system must not be over-network ++ for 'open' division, the system may be either local or over-network ++ """ ++ is_network_mode_sys_spec_str = system_json.get(SYSTEM_DESC_IS_NETWORK_MODE) ++ is_network_system = is_network_mode_sys_spec_str.lower()=="true" if is_network_mode_sys_spec_str is not None else False ++ # verify that the system corresponds the division ++ is_valid = True ++ expected_state_by_division = {"network": True, "closed": False} ++ if division in expected_state_by_division: ++ is_valid = expected_state_by_division[division] is is_network_system ++ if not is_valid: ++ log.error(f"{path} incorrect network mode (={is_network_system}) for division '{division}'") ++ return is_network_system, is_valid ++ ++def check_results_dir(config, filter_submitter, skip_compliance, csv, debug=False): ++ """ ++ Walk the results directory and do the checking. ++ ++ We are called with the cdw at the root of the submission directory. ++ level1 division - closed|open|network ++ level2 submitter - for example mlperf_org ++ level3 - results, systems, measurements, code ++ ++ For results the structure from here is: ++ results/$system_desc/$benchmark_model/$scenario/performance/run_n ++ and ++ results/$system_desc/$benchmark_model/$scenario/accuracy ++ ++ We first walk into results/$system_desc ++ make sure there is a system_desc.json and its good ++ Next we walk into the model ++ make sure the model is good, make sure all required scenarios are there. ++ Next we walk into each scenario ++ check the performance directory ++ check the accuracy directory ++ if all was good, add the result to the results directory ++ if there are errors write a None as result so we can report later what failed ++ """ ++ head = [ ++ "Organization", "Availability", "Division", "SystemType", "SystemName", ++ "Platform", "Model", "MlperfModel", "Scenario", "Result", "Accuracy", ++ "number_of_nodes", "host_processor_model_name", ++ "host_processors_per_node", "host_processor_core_count", ++ "accelerator_model_name", "accelerators_per_node", "Location", ++ "framework", "operating_system", "notes", "compilance", "errors", ++ "version", "inferred", "has_power", "Units" ++ ] ++ fmt = ",".join(["{}"] * len(head)) + "\n" ++ csv.write(",".join(head) + "\n") ++ results = {} ++ ++ def log_result(submitter, ++ available, ++ division, ++ system_type, ++ system_name, ++ system_desc, ++ model_name, ++ mlperf_model, ++ scenario_fixed, ++ r, ++ acc, ++ system_json, ++ name, ++ compilance, ++ errors, ++ config, ++ inferred=0, ++ power_metric=0): ++ ++ notes = system_json.get("hw_notes", "") ++ if system_json.get("sw_notes"): ++ notes = notes + ". " + system_json.get("sw_notes") ++ unit_dict = { ++ "SingleStream": "Latency (ms)", ++ "MultiStream": "Latency (ms)", ++ "Offline": "Samples/s", ++ "Server": "Queries/s", ++ } ++ power_unit_dict = { ++ "SingleStream": "Joules", ++ "MultiStream": "Joules", ++ "Offline": "Watts", ++ "Server": "Watts", ++ } ++ unit = unit_dict[scenario_fixed] ++ power_unit = power_unit_dict[scenario_fixed] ++ ++ csv.write( ++ fmt.format(submitter, available, division, '\"' + system_type + '\"', ++ '\"' + system_name + '\"', system_desc, model_name, ++ mlperf_model, scenario_fixed, r, acc, ++ system_json.get("number_of_nodes"), ++ '"' + system_json.get("host_processor_model_name") + '"', ++ system_json.get("host_processors_per_node"), ++ system_json.get("host_processor_core_count"), ++ '"' + system_json.get("accelerator_model_name") + '"', ++ system_json.get("accelerators_per_node"), ++ name.replace("\\", "/"), ++ '"' + system_json.get("framework", "") + '"', ++ '"' + system_json.get("operating_system", "") + '"', ++ '"' + notes + '"', compilance, errors, config.version, ++ inferred, power_metric > 0, unit)) ++ ++ if power_metric > 0: ++ csv.write( ++ fmt.format(submitter, available, division, '\"' + system_type + '\"', ++ '\"' + system_name + '\"', system_desc, model_name, ++ mlperf_model, scenario_fixed, power_metric, acc, ++ system_json.get("number_of_nodes"), ++ '"' + system_json.get("host_processor_model_name") + '"', ++ system_json.get("host_processors_per_node"), ++ system_json.get("host_processor_core_count"), ++ '"' + system_json.get("accelerator_model_name") + '"', ++ system_json.get("accelerators_per_node"), ++ name.replace("\\", "/"), ++ '"' + system_json.get("framework", "") + '"', ++ '"' + system_json.get("operating_system", "") + '"', ++ '"' + notes + '"', compilance, errors, config.version, ++ inferred, power_metric > 0, power_unit)) ++ ++ # we are at the top of the submission directory ++ for division in list_dir("."): ++ # we are looking at ./$division, ie ./closed ++ if division not in VALID_DIVISIONS: ++ if division not in [".git", ".github", "assets"]: ++ log.error("invalid division in input dir %s", division) ++ continue ++ is_closed_or_network = division in ["closed", "network"] ++ ++ for submitter in list_dir(division): ++ # we are looking at ./$division/$submitter, ie ./closed/mlperf_org ++ if filter_submitter and submitter != filter_submitter: ++ continue ++ results_path = os.path.join(division, submitter, "results") ++ if not os.path.exists(results_path): ++ continue ++ ++ for system_desc in list_dir(results_path): ++ # we are looking at ./$division/$submitter/results/$system_desc, ie ./closed/mlperf_org/results/t4-ort ++ ++ # ++ # check if system_id is good. ++ # ++ system_id_json = os.path.join(division, submitter, "systems", system_desc + ".json") ++ if not os.path.exists(system_id_json): ++ log.error("no system_desc for %s/%s/%s", division, submitter, system_desc) ++ results[os.path.join(results_path, system_desc)] = None ++ continue ++ ++ name = os.path.join(results_path, system_desc) ++ with open(system_id_json) as f: ++ system_json = json.load(f) ++ available = system_json.get("status").lower() ++ if available not in VALID_AVAILABILITIES: ++ log.error("%s has invalid status (%s)", system_id_json, available) ++ results[name] = None ++ continue ++ system_type = system_json.get("system_type") ++ if config.version not in ["v0.5"]: ++ valid_system_types = ["datacenter", "edge"] ++ if config.version not in ["v0.7"]: ++ valid_system_types += ["datacenter,edge", "edge,datacenter"] ++ if system_type not in valid_system_types: ++ log.error("%s has invalid system type (%s)", system_id_json, system_type) ++ results[name] = None ++ continue ++ config.set_type(system_type) ++ if not check_system_desc_id(name, system_json, submitter, division, config.version): ++ results[name] = None ++ continue ++ ++ # ++ # Look at each model ++ # ++ for model_name in list_dir(results_path, system_desc): ++ ++ # we are looking at ./$division/$submitter/results/$system_desc/$model, ++ # ie ./closed/mlperf_org/results/t4-ort/bert ++ name = os.path.join(results_path, system_desc, model_name) ++ mlperf_model = config.get_mlperf_model(model_name) ++ ++ if is_closed_or_network and mlperf_model not in config.models: ++ # for closed/network divisions we want the model name to match. ++ # for open division the model_name might be different than the task ++ log.error("%s has an invalid model %s for closed/network division", name, ++ model_name) ++ results[name] = None ++ continue ++ ++ # ++ # Look at each scenario ++ # ++ required_scenarios = config.get_required(mlperf_model) ++ if required_scenarios is None: ++ log.error("%s has an invalid model %s, system_type=%s", name, ++ mlperf_model, system_type) ++ results[name] = None ++ continue ++ ++ errors = 0 ++ all_scenarios = set(list(required_scenarios) + list(config.get_optional(mlperf_model))) ++ for scenario in list_dir(results_path, system_desc, model_name): ++ # some submissions in v0.5 use lower case scenarios - map them for now ++ scenario_fixed = SCENARIO_MAPPING.get(scenario, scenario) ++ ++ # we are looking at ./$division/$submitter/results/$system_desc/$model/$scenario, ++ # ie ./closed/mlperf_org/results/t4-ort/bert/Offline ++ name = os.path.join(results_path, system_desc, model_name, scenario) ++ results[name] = None ++ if is_closed_or_network and scenario_fixed not in all_scenarios: ++ log.warning("%s ignoring scenario %s (neither required nor optional)", name, scenario) ++ continue ++ ++ # check if measurement_dir is good. ++ measurement_dir = os.path.join(division, submitter, "measurements", ++ system_desc, model_name, scenario) ++ if not os.path.exists(measurement_dir): ++ log.error("no measurement_dir for %s", measurement_dir) ++ results[measurement_dir] = None ++ errors += 1 ++ else: ++ if not check_measurement_dir(measurement_dir, name, system_desc, ++ os.path.join(division, submitter), model_name, scenario): ++ log.error("%s measurement_dir has issues", measurement_dir) ++ # results[measurement_dir] = None ++ errors += 1 ++ # FIXME: we should not accept this submission ++ # continue ++ ++ # check accuracy ++ accuracy_is_valid = False ++ acc_path = os.path.join(name, "accuracy") ++ if not os.path.exists(os.path.join(acc_path, "accuracy.txt")): ++ log.error( ++ "%s has no accuracy.txt. Generate it with accuracy-imagenet.py or accuracy-coco.py or " ++ "process_accuracy.py", acc_path) ++ else: ++ diff = files_diff(list_files(acc_path), REQUIRED_ACC_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", acc_path, diff) ++ accuracy_is_valid, acc = check_accuracy_dir(config, mlperf_model, acc_path, debug or is_closed_or_network) ++ if not accuracy_is_valid and not is_closed_or_network: ++ if debug: ++ log.warning("%s, accuracy not valid but taken for open", acc_path) ++ accuracy_is_valid = True ++ if not accuracy_is_valid: ++ # a little below we'll not copy this into the results csv ++ errors += 1 ++ log.error("%s, accuracy not valid", acc_path) ++ ++ inferred = 0 ++ if scenario in ["Server"] and config.version in ["v0.5", "v0.7"]: ++ n = ["run_1", "run_2", "run_3", "run_4", "run_5"] ++ else: ++ n = ["run_1"] ++ ++ # check if this submission has power logs ++ power_path = os.path.join(name, "performance", "power") ++ has_power = os.path.exists(power_path) ++ if has_power: ++ log.info("Detected power logs for %s", name) ++ ++ for i in n: ++ perf_path = os.path.join(name, "performance", i) ++ if not os.path.exists(perf_path): ++ log.error("%s is missing", perf_path) ++ continue ++ if has_power: ++ required_perf_files = REQUIRED_PERF_FILES + REQUIRED_PERF_POWER_FILES ++ else: ++ required_perf_files = REQUIRED_PERF_FILES ++ diff = files_diff(list_files(perf_path), required_perf_files, OPTIONAL_PERF_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", perf_path, diff) ++ ++ try: ++ is_valid, r, is_inferred = check_performance_dir(config, mlperf_model, perf_path, scenario_fixed, division, system_json) ++ if is_inferred: ++ inferred = 1 ++ log.info("%s has inferred results, qps=%s", perf_path, r) ++ except Exception as e: ++ log.error("%s caused exception in check_performance_dir: %s", perf_path, e) ++ is_valid, r = False, None ++ ++ power_metric = 0 ++ if has_power: ++ try: ++ ranging_path = os.path.join(name, "performance", "ranging") ++ power_is_valid, power_metric = check_power_dir(power_path, ranging_path, perf_path, scenario_fixed, ++ config) ++ if not power_is_valid: ++ is_valid = False ++ power_metric = 0 ++ except Exception as e: ++ log.error("%s caused exception in check_power_dir: %s", perf_path, e) ++ is_valid, r, power_metric = False, None, 0 ++ ++ if is_valid: ++ results[name] = r if r is None or power_metric == 0 else "{:f} with power_metric = {:f}".format(r, power_metric) ++ required_scenarios.discard(scenario_fixed) ++ else: ++ log.error("%s has issues", perf_path) ++ errors += 1 ++ ++ # check if compliance dir is good for CLOSED division ++ compliance = 0 if is_closed_or_network else 1 ++ if is_closed_or_network and not skip_compliance: ++ compliance_dir = os.path.join(division, submitter, "compliance", ++ system_desc, model_name, scenario) ++ if not os.path.exists(compliance_dir): ++ log.error("no compliance dir for %s", name) ++ results[name] = None ++ else: ++ if not check_compliance_dir(compliance_dir, mlperf_model, scenario_fixed, config, division, system_json): ++ log.error("compliance dir %s has issues", compliance_dir) ++ results[name] = None ++ else: ++ compliance = 1 ++ ++ if results.get(name): ++ if accuracy_is_valid: ++ log_result(submitter, available, division, system_type, system_json.get("system_name"), system_desc, model_name, mlperf_model, ++ scenario_fixed, r, acc, system_json, name, compliance, errors, config, inferred=inferred, power_metric=power_metric) ++ else: ++ results[name] = None ++ log.error("%s is OK but accuracy has issues", name) ++ ++ if required_scenarios: ++ name = os.path.join(results_path, system_desc, model_name) ++ if is_closed_or_network: ++ results[name] = None ++ log.error("%s does not have all required scenarios, missing %s", name, required_scenarios) ++ elif debug: ++ log.warning("%s ignoring missing scenarios in open division (%s)", name, required_scenarios) ++ ++ return results ++ ++ ++def check_system_desc_id(fname, systems_json, submitter, division, version): ++ is_valid = True ++ # check all required fields ++ if version in ["v0.5", "v0.7"]: ++ required_fields = SYSTEM_DESC_REQUIRED_FIELDS ++ else: ++ required_fields = SYSTEM_DESC_REQUIRED_FIELDS + SYSTEM_DESC_REQUIED_FIELDS_SINCE_V1 ++ ++ is_network_system, is_network_mode_valid = is_system_over_network(division, systems_json, fname) ++ is_valid &= is_network_mode_valid ++ if is_network_system: ++ required_fields += SYSTEM_DESC_REQUIRED_FIELDS_NETWORK_MODE ++ ++ for k in required_fields: ++ if k not in systems_json: ++ is_valid = False ++ log.error("%s, field %s is missing", fname, k) ++ ++ if version in ["v0.5", "v0.7"]: ++ all_fields = required_fields + SYSTEM_DESC_REQUIED_FIELDS_SINCE_V1 ++ else: ++ # TODO: SYSTEM_DESC_REQUIED_FIELDS_POWER should be mandatory when a submission has power logs, but since we ++ # check power submission in check_results_dir, the information is not available yet at this stage. ++ all_fields = required_fields + SYSTEM_DESC_REQUIED_FIELDS_POWER ++ for k in systems_json.keys(): ++ if k not in all_fields: ++ log.warning("%s, field %s is unknown", fname, k) ++ ++ if systems_json.get("submitter").lower() != submitter.lower(): ++ log.error("%s has submitter %s, directory has %s", fname, systems_json.get("submitter"), submitter) ++ is_valid = False ++ if systems_json.get("division") != division: ++ log.error("%s has division %s, division has %s", fname, systems_json.get("division"), division) ++ is_valid = False ++ return is_valid ++ ++ ++def check_measurement_dir(measurement_dir, fname, system_desc, root, model, scenario): ++ files = list_files(measurement_dir) ++ system_file = None ++ is_valid = True ++ for i in REQUIRED_MEASURE_FILES: ++ if i not in files: ++ log.error("%s is missing %s", measurement_dir, i) ++ is_valid = False ++ for i in files: ++ if i.startswith(system_desc) and i.endswith("_" + scenario + ".json"): ++ system_file = i ++ end = len("_" + scenario + ".json") ++ break ++ elif i.startswith(system_desc) and i.endswith(".json"): ++ system_file = i ++ end = len(".json") ++ break ++ if system_file: ++ with open(os.path.join(measurement_dir, system_file), "r") as f: ++ j = json.load(f) ++ for k in SYSTEM_IMP_REQUIRED_FILES: ++ if k not in j: ++ is_valid = False ++ log.error("%s, field %s is missing", fname, k) ++ ++ impl = system_file[len(system_desc) + 1:-end] ++ code_dir = os.path.join(root, "code", model) ++ if os.path.isfile(code_dir): ++ with open(code_dir, "r") as f: ++ line = f.read() ++ code_dir = os.path.join(root, "code", line.strip(), impl) ++ else: ++ code_dir = os.path.join(root, "code", model, impl) ++ ++ if not os.path.exists(code_dir): ++ # see if the code dir is per model ++ if not os.path.exists(os.path.dirname(code_dir)): ++ log.error("%s is missing code_dir %s", fname, code_dir) ++ is_valid = False ++ else: ++ log.error("%s is missing %s*.json", fname, system_desc) ++ is_valid = False ++ ++ return is_valid ++ ++def check_compliance_perf_dir(test_dir): ++ is_valid = False ++ ++ fname = os.path.join(test_dir, "verify_performance.txt") ++ if not os.path.exists(fname): ++ log.error("%s is missing in %s", fname, test_dir) ++ is_valid = False ++ else: ++ with open(fname, "r") as f: ++ for line in f: ++ # look for: TEST PASS ++ if "TEST PASS" in line: ++ is_valid = True ++ break ++ if is_valid == False: ++ log.error("Compliance test performance check in %s failed", test_dir) ++ ++ # Check performance dir ++ test_perf_path = os.path.join(test_dir, "performance", "run_1") ++ if not os.path.exists(test_perf_path): ++ log.error("%s has no performance/run_1 directory", test_dir) ++ is_valid = False ++ else: ++ diff = files_diff( ++ list_files(test_perf_path), REQUIRED_COMP_PER_FILES, ++ ["mlperf_log_accuracy.json"]) ++ if diff: ++ log.error("%s has file list mismatch (%s)", test_perf_path, diff) ++ is_valid = False ++ ++ return is_valid ++ ++def check_compliance_acc_dir(test_dir): ++ is_valid = False ++ acc_passed = False ++ ++ fname = os.path.join(test_dir, "verify_accuracy.txt") ++ if not os.path.exists(fname): ++ log.error("%s is missing in %s", fname, test_dir) ++ else: ++ # Accuracy can fail for TEST01 ++ is_valid = True ++ with open(fname, "r") as f: ++ for line in f: ++ # look for: TEST PASS ++ if "TEST PASS" in line: ++ acc_passed = True ++ break ++ if acc_passed == False: ++ log.info("Compliance test accuracy check in %s failed", test_dir) ++ ++ # Check Accuracy dir ++ test_acc_path = os.path.join(test_dir, "accuracy") ++ if not os.path.exists(test_acc_path): ++ log.error("%s has no accuracy directory", test_dir) ++ is_valid = False ++ else: ++ diff = files_diff(list_files(test_acc_path), REQUIRED_TEST01_ACC_FILES_1 if acc_passed else REQUIRED_TEST01_ACC_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", test_acc_path, diff) ++ is_valid = False ++ ++ return is_valid ++ ++def check_compliance_dir(compliance_dir, model, scenario, config, division, system_json): ++ compliance_perf_pass = True ++ compliance_perf_dir_pass = True ++ compliance_acc_pass = True ++ test_list = ["TEST01", "TEST04", "TEST05"] ++ ++ if model in ["rnnt", "bert-99", "bert-99.9", "dlrm-99", "dlrm-99.9", "3d-unet-99", "3d-unet-99.9", "retinanet"]: ++ test_list.remove("TEST04") ++ ++ #Check performance of all Tests ++ for test in test_list: ++ test_dir = os.path.join(compliance_dir, test) ++ if not os.path.exists(test_dir): ++ log.error("Missing %s in compliance dir %s", test, compliance_dir) ++ compliance_perf_dir_pass = False ++ else: ++ try: ++ compliance_perf_dir = os.path.join(compliance_dir, test, "performance","run_1") ++ compliance_perf_valid, r, is_inferred = check_performance_dir(config, model, compliance_perf_dir, scenario, division, system_json) ++ if is_inferred: ++ log.info("%s has inferred results, qps=%s", compliance_perf_dir, r) ++ except Exception as e: ++ log.error("%s caused exception in check_performance_dir: %s", compliance_perf_dir, e) ++ is_valid, r = False, None ++ compliance_perf_pass = compliance_perf_pass and check_compliance_perf_dir(test_dir) and compliance_perf_valid ++ ++ ++ ++ #Check accuracy for TEST01 ++ compliance_acc_pass = check_compliance_acc_dir(os.path.join(compliance_dir, "TEST01")) ++ ++ return compliance_perf_pass and compliance_acc_pass and compliance_perf_dir_pass ++ ++def main(): ++ args = get_args() ++ ++ config = Config(args.version, args.extra_model_benchmark_map, ignore_uncommited=args.submission_exceptions, ++ more_power_check=args.more_power_check) ++ ++ with open(args.csv, "w") as csv: ++ os.chdir(args.input) ++ # check results directory ++ results = check_results_dir(config, args.submitter, args.skip_compliance, csv, args.debug) ++ ++ # log results ++ log.info("---") ++ with_results = 0 ++ for k, v in sorted(results.items()): ++ if v: ++ log.info("Results %s %s", k, v) ++ with_results += 1 ++ log.info("---") ++ for k, v in sorted(results.items()): ++ if v is None: ++ log.error("NoResults %s", k) ++ ++ # print summary ++ log.info("---") ++ log.info("Results=%d, NoResults=%d", with_results, len(results) - with_results) ++ if len(results) != with_results: ++ log.error("SUMMARY: submission has errors") ++ return 1 ++ else: ++ log.info("SUMMARY: submission looks OK") ++ return 0 ++ ++ ++if __name__ == "__main__": ++ sys.exit(main()) +diff --git a/vision/classification_and_detection/run_local.sh b/vision/classification_and_detection/run_local.sh +index e69e3b8..fa5c482 100755 +--- a/vision/classification_and_detection/run_local.sh ++++ b/vision/classification_and_detection/run_local.sh +@@ -4,7 +4,7 @@ source ./run_common.sh + + common_opt="--mlperf_conf ../../mlperf.conf" + dataset="--dataset-path $DATA_DIR" +-OUTPUT_DIR=`pwd`/output/$name ++OUTPUT_DIR=${OUTPUT_DIR:-`pwd`/output/$name} + if [ ! -d $OUTPUT_DIR ]; then + mkdir -p $OUTPUT_DIR + fi diff --git a/script/get-mlperf-inference-src/patch/openimages-pycocotools.patch b/script/get-mlperf-inference-src/patch/openimages-pycocotools.patch new file mode 100644 index 0000000000..7dc3126781 --- /dev/null +++ b/script/get-mlperf-inference-src/patch/openimages-pycocotools.patch @@ -0,0 +1,24 @@ +diff --git a/vision/classification_and_detection/tools/accuracy-openimages.py b/vision/classification_and_detection/tools/accuracy-openimages.py +index 655ae5c..497dcbd 100644 +--- a/vision/classification_and_detection/tools/accuracy-openimages.py ++++ b/vision/classification_and_detection/tools/accuracy-openimages.py +@@ -36,7 +36,7 @@ def main(): + annotations_file = os.environ.get('DATASET_ANNOTATIONS_FILE_PATH') + if not annotations_file: + annotations_file = os.path.join(args.openimages_dir, "annotations/openimages-mlperf.json") +- cocoGt = COCO(annotations_file) ++ cocoGt = COCO(annotations_file, use_ext=True) + + if args.use_inv_map: + inv_map = [0] + cocoGt.getCatIds() # First label in inv_map is not used +@@ -100,8 +100,8 @@ def main(): + with open(args.output_file, "w") as fp: + json.dump(detections, fp, sort_keys=True, indent=4) + +- cocoDt = cocoGt.loadRes(args.output_file) # Load from file to bypass error with Python3 +- cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox') ++ cocoDt = cocoGt.loadRes(args.output_file, use_ext=True) # Load from file to bypass error with Python3 ++ cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox', use_ext=True) + cocoEval.params.imgIds = list(image_ids) + cocoEval.evaluate() + cocoEval.accumulate() diff --git a/script/get-mlperf-inference-src/patch/windows-openimages.patch b/script/get-mlperf-inference-src/patch/windows-openimages.patch new file mode 100644 index 0000000000..5be282ac64 --- /dev/null +++ b/script/get-mlperf-inference-src/patch/windows-openimages.patch @@ -0,0 +1,64 @@ +--- a/vision/classification_and_detection/tools/openimages.py Thu May 11 12:46:00 2023 ++++ b/vision/classification_and_detection/tools/openimages.py Thu May 11 13:02:53 2023 +@@ -57,6 +57,44 @@ + MAP_CLASSES_URL = "https://storage.googleapis.com/openimages/v5/class-descriptions-boxable.csv" + MAP_CLASSES_FILE = "class-descriptions-boxable.csv" + CHUNK_SIZE = 1024 * 8 ++MLPERF_CLASSES=['Airplane', 'Antelope', 'Apple', 'Backpack', 'Balloon', 'Banana', ++'Barrel', 'Baseball bat', 'Baseball glove', 'Bee', 'Beer', 'Bench', 'Bicycle', ++'Bicycle helmet', 'Bicycle wheel', 'Billboard', 'Book', 'Bookcase', 'Boot', ++'Bottle', 'Bowl', 'Bowling equipment', 'Box', 'Boy', 'Brassiere', 'Bread', ++'Broccoli', 'Bronze sculpture', 'Bull', 'Bus', 'Bust', 'Butterfly', 'Cabinetry', ++'Cake', 'Camel', 'Camera', 'Candle', 'Candy', 'Cannon', 'Canoe', 'Carrot', 'Cart', ++'Castle', 'Cat', 'Cattle', 'Cello', 'Chair', 'Cheese', 'Chest of drawers', 'Chicken', ++'Christmas tree', 'Coat', 'Cocktail', 'Coffee', 'Coffee cup', 'Coffee table', 'Coin', ++'Common sunflower', 'Computer keyboard', 'Computer monitor', 'Convenience store', ++'Cookie', 'Countertop', 'Cowboy hat', 'Crab', 'Crocodile', 'Cucumber', 'Cupboard', ++'Curtain', 'Deer', 'Desk', 'Dinosaur', 'Dog', 'Doll', 'Dolphin', 'Door', 'Dragonfly', ++'Drawer', 'Dress', 'Drum', 'Duck', 'Eagle', 'Earrings', 'Egg (Food)', 'Elephant', ++'Falcon', 'Fedora', 'Flag', 'Flowerpot', 'Football', 'Football helmet', 'Fork', ++'Fountain', 'French fries', 'French horn', 'Frog', 'Giraffe', 'Girl', 'Glasses', ++'Goat', 'Goggles', 'Goldfish', 'Gondola', 'Goose', 'Grape', 'Grapefruit', 'Guitar', ++'Hamburger', 'Handbag', 'Harbor seal', 'Headphones', 'Helicopter', 'High heels', ++'Hiking equipment', 'Horse', 'House', 'Houseplant', 'Human arm', 'Human beard', ++'Human body', 'Human ear', 'Human eye', 'Human face', 'Human foot', 'Human hair', ++'Human hand', 'Human head', 'Human leg', 'Human mouth', 'Human nose', 'Ice cream', ++'Jacket', 'Jeans', 'Jellyfish', 'Juice', 'Kitchen & dining room table', 'Kite', ++'Lamp', 'Lantern', 'Laptop', 'Lavender (Plant)', 'Lemon', 'Light bulb', 'Lighthouse', ++'Lily', 'Lion', 'Lipstick', 'Lizard', 'Man', 'Maple', 'Microphone', 'Mirror', ++'Mixing bowl', 'Mobile phone', 'Monkey', 'Motorcycle', 'Muffin', 'Mug', 'Mule', ++'Mushroom', 'Musical keyboard', 'Necklace', 'Nightstand', 'Office building', ++'Orange', 'Owl', 'Oyster', 'Paddle', 'Palm tree', 'Parachute', 'Parrot', 'Pen', ++'Penguin', 'Personal flotation device', 'Piano', 'Picture frame', 'Pig', 'Pillow', ++'Pizza', 'Plate', 'Platter', 'Porch', 'Poster', 'Pumpkin', 'Rabbit', 'Rifle', ++'Roller skates', 'Rose', 'Salad', 'Sandal', 'Saucer', 'Saxophone', 'Scarf', 'Sea lion', ++'Sea turtle', 'Sheep', 'Shelf', 'Shirt', 'Shorts', 'Shrimp', 'Sink', 'Skateboard', ++'Ski', 'Skull', 'Skyscraper', 'Snake', 'Sock', 'Sofa bed', 'Sparrow', 'Spider', 'Spoon', ++'Sports uniform', 'Squirrel', 'Stairs', 'Stool', 'Strawberry', 'Street light', ++'Studio couch', 'Suit', 'Sun hat', 'Sunglasses', 'Surfboard', 'Sushi', 'Swan', ++'Swimming pool', 'Swimwear', 'Tank', 'Tap', 'Taxi', 'Tea', 'Teddy bear', 'Television', ++'Tent', 'Tie', 'Tiger', 'Tin can', 'Tire', 'Toilet', 'Tomato', 'Tortoise', 'Tower', ++'Traffic light', 'Train', 'Tripod', 'Truck', 'Trumpet', 'Umbrella', 'Van', 'Vase', ++'Vehicle registration plate', 'Violin', 'Wall clock', 'Waste container', 'Watch', ++'Whale', 'Wheel', 'Wheelchair', 'Whiteboard', 'Window', 'Wine', 'Wine glass', 'Woman', ++'Zebra', 'Zucchini'] + + + def get_args(): +@@ -70,14 +108,14 @@ + ) + parser.add_argument( + "--classes", +- default=None, ++ default=MLPERF_CLASSES, + nargs="+", + type=str, + help="Classes to download. default to all classes", + ) + parser.add_argument( + "--output-labels", +- default="labels.json", ++ default="openimages-mlperf.json", + type=str, + help="Name of the file to output output the labels", + ) diff --git a/script/get-mlperf-inference-src/patch/windows-openimages2.patch b/script/get-mlperf-inference-src/patch/windows-openimages2.patch new file mode 100644 index 0000000000..fa0e43fcda --- /dev/null +++ b/script/get-mlperf-inference-src/patch/windows-openimages2.patch @@ -0,0 +1,11 @@ +--- a/vision/classification_and_detection/python/openimages.py Thu May 11 13:56:13 2023 ++++ b/vision/classification_and_detection/python/openimages.py Thu May 11 13:02:53 2023 +@@ -85,7 +85,7 @@ + not_found += 1 + continue + else: +- src = os.path.join(data_path, image_name) ++ src = os.path.join(data_path, "validation", "data", image_name) + if not os.path.exists(src): + # if the image does not exists ignore it + not_found += 1 diff --git a/script/get-mlperf-inference-submission-dir/README.md b/script/get-mlperf-inference-submission-dir/README.md new file mode 100644 index 0000000000..576db43f8c --- /dev/null +++ b/script/get-mlperf-inference-submission-dir/README.md @@ -0,0 +1,161 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-submission-dir** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-submission-dir,ddf36a41d6934a7e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-submission-dir)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,inference,submission,dir,directory* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf inference submission dir directory" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,inference,submission,dir,directory` + +`cm run script --tags=get,mlperf,inference,submission,dir,directory[,variations] [--input_flags]` + +*or* + +`cmr "get mlperf inference submission dir directory"` + +`cmr "get mlperf inference submission dir directory [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,submission,dir,directory' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,inference,submission,dir,directory"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,inference,submission,dir,directory) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf inference submission dir directory[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**version**" +
+ Click here to expand this section. + + * `_version.#` + - Environment variables: + - *CM_MLPERF_INFERENCE_SUBMISSION_VERSION*: `#` + - Workflow: + * **`_version.4_0`** (default) + - Environment variables: + - *CM_MLPERF_INFERENCE_SUBMISSION_VERSION*: `4_0` + - Workflow: + +
+ + +#### Default variations + +`_version.4_0` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "submission_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-submission-dir/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-submission-dir/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-submission-dir/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-submission-dir/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-submission-dir/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-submission-dir/_cm.json) + +___ +### Script output +`cmr "get mlperf inference submission dir directory [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_INFERENCE_SUBMISSION_DIR` +* `CM_MLPERF_INFERENCE_SUBMISSION_VERSION` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_INFERENCE_SUBMISSION_DIR` \ No newline at end of file diff --git a/script/get-mlperf-inference-submission-dir/_cm.json b/script/get-mlperf-inference-submission-dir/_cm.json new file mode 100644 index 0000000000..3c2f648f06 --- /dev/null +++ b/script/get-mlperf-inference-submission-dir/_cm.json @@ -0,0 +1,48 @@ +{ + "alias": "get-mlperf-inference-submission-dir", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "deps": [], + "docker": { + "run": false + }, + "input_description": {}, + "input_mapping": { + "submission_dir": "CM_MLPERF_INFERENCE_SUBMISSION_DIR" + }, + "new_env_keys": [ + "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "CM_MLPERF_INFERENCE_SUBMISSION_VERSION" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "mlperf", + "inference", + "submission", + "dir", + "directory" + ], + "uid": "ddf36a41d6934a7e", + "variations": { + "version.#": { + "env": { + "CM_MLPERF_INFERENCE_SUBMISSION_VERSION": "#" + }, + "group": "version" + }, + "version.4_0": { + "default": true, + "env": { + "CM_MLPERF_INFERENCE_SUBMISSION_VERSION": "4_0" + }, + "group": "version" + } + }, + "versions": {} +} diff --git a/script/get-mlperf-inference-submission-dir/customize.py b/script/get-mlperf-inference-submission-dir/customize.py new file mode 100644 index 0000000000..92fb3735ce --- /dev/null +++ b/script/get-mlperf-inference-submission-dir/customize.py @@ -0,0 +1,29 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR','') == '': + if not os.path.exists("mlperf-inference-submission"): + os.mkdir("mlperf-inference-submission") + env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join(os.getcwd(), "mlperf-inference-submission") + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + + return {'return':0} diff --git a/script/get-mlperf-inference-sut-configs/README-extra.md b/script/get-mlperf-inference-sut-configs/README-extra.md new file mode 100644 index 0000000000..41e6b8cc94 --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/README-extra.md @@ -0,0 +1,6 @@ +# Get Config SUT MLPerf Inference +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) loads the MLPerf inference performance configuration of a given System Under Test (SUT). + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-mlperf-inference-sut-configs/README.md b/script/get-mlperf-inference-sut-configs/README.md new file mode 100644 index 0000000000..68eecdf33d --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/README.md @@ -0,0 +1,163 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-sut-configs** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-sut-configs,c2fbf72009e2445b) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-configs)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,inference,sut,configs,sut-configs* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf inference sut configs sut-configs" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,inference,sut,configs,sut-configs` + +`cm run script --tags=get,mlperf,inference,sut,configs,sut-configs[,variations] [--input_flags]` + +*or* + +`cmr "get mlperf inference sut configs sut-configs"` + +`cmr "get mlperf inference sut configs sut-configs [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,sut,configs,sut-configs' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,inference,sut,configs,sut-configs"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,inference,sut,configs,sut-configs) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf inference sut configs sut-configs[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_octoml` + - Environment variables: + - *CM_SUT_USE_EXTERNAL_CONFIG_REPO*: `yes` + - *CM_GIT_CHECKOUT_FOLDER*: `configs` + - *CM_GIT_URL*: `https://github.com/arjunsuresh/mlperf-inference-configs` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * get,git,repo,_repo.mlperf_inference_configs_octoml + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--configs_git_url=value` → `CM_GIT_URL=value` +* `--repo_path=value` → `CM_SUT_CONFIGS_PATH=value` +* `--run_config=value` → `CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "configs_git_url":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_SUT_CONFIGS_PATH: `` +* CM_GIT_URL: `` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-configs/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-configs/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-configs/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-configs/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-configs/_cm.json) + +___ +### Script output +`cmr "get mlperf inference sut configs sut-configs [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_HW_*` +* `CM_SUT_*` +#### New environment keys auto-detected from customize + +* `CM_HW_NAME` +* `CM_SUT_NAME` \ No newline at end of file diff --git a/script/get-mlperf-inference-sut-configs/_cm.json b/script/get-mlperf-inference-sut-configs/_cm.json new file mode 100644 index 0000000000..7ad8376f3b --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/_cm.json @@ -0,0 +1,50 @@ +{ + "alias": "get-mlperf-inference-sut-configs", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "new_env_keys": [ + "CM_HW_*", + "CM_SUT_*" + ], + "new_state_keys": [ + "CM_SUT_*" + ], + "default_env": { + "CM_SUT_CONFIGS_PATH": "", + "CM_GIT_URL": "" + }, + "input_mapping": { + "repo_path": "CM_SUT_CONFIGS_PATH", + "configs_git_url": "CM_GIT_URL", + "run_config": "CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX" + }, + "tags": [ + "get", + "mlperf", + "inference", + "sut", + "configs", + "sut-configs" + ], + "uid": "c2fbf72009e2445b", + "variations": { + "octoml": { + "env": { + "CM_SUT_USE_EXTERNAL_CONFIG_REPO": "yes", + "CM_GIT_CHECKOUT_FOLDER": "configs", + "CM_GIT_URL": "https://github.com/arjunsuresh/mlperf-inference-configs" + }, + "prehook_deps": [ + { + "force_env_keys": [ + "CM_GIT_URL", + "CM_GIT_CHECKOUT_*" + ], + "tags": "get,git,repo,_repo.mlperf_inference_configs_octoml" + } + ] + } + } +} diff --git a/script/get-mlperf-inference-sut-configs/configs/default/config.yaml b/script/get-mlperf-inference-sut-configs/configs/default/config.yaml new file mode 100644 index 0000000000..0a30a8cd2f --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/configs/default/config.yaml @@ -0,0 +1,73 @@ +--- + resnet50: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 0.1 + MultiStream: + target_latency: 0.1 + retinanet: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 1 + MultiStream: + target_latency: 1 + bert-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 1 + bert-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + 3d-unet-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 500 + 3d-unet-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 500 + gpt-j: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 500 + sdxl: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 200 + llama2-70b-99: + Offline: + target_qps: 0.1 + Server: + target_qps: 0.1 + SingleStream: + target_latency: 2000 + llama2-70b-99.9: + Offline: + target_qps: 0.1 + Server: + target_qps: 0.1 + SingleStream: + target_latency: 2000 diff --git a/script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml b/script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml new file mode 100644 index 0000000000..8fdf44d7dc --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml @@ -0,0 +1,55 @@ +--- + resnet50: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + retinanet: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + bert-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + bert-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + 3d-unet-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + 3d-unet-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 diff --git a/script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml b/script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml new file mode 100644 index 0000000000..8fdf44d7dc --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml @@ -0,0 +1,55 @@ +--- + resnet50: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + retinanet: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + bert-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + bert-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + 3d-unet-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + 3d-unet-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 diff --git a/script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml b/script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml new file mode 100644 index 0000000000..2e0f034ba4 --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/configs/phoenix/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml @@ -0,0 +1,54 @@ +3d-unet-99: + MultiStream: + target_latency: 80 + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 +3d-unet-99.9: + MultiStream: + target_latency: 80 + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 +bert-99: + MultiStream: + target_latency: 80 + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 +bert-99.9: + MultiStream: + target_latency: 80 + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 +resnet50: + MultiStream: + target_latency: '432111' + Offline: + target_qps: '37959.4' + Server: + target_qps: 1.0 + SingleStream: + target_latency: '226895' +retinanet: + MultiStream: + target_latency: 80 + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 diff --git a/script/get-mlperf-inference-sut-configs/customize.py b/script/get-mlperf-inference-sut-configs/customize.py new file mode 100644 index 0000000000..f074ed30ba --- /dev/null +++ b/script/get-mlperf-inference-sut-configs/customize.py @@ -0,0 +1,63 @@ +from cmind import utils +import os +import yaml +import shutil + +def postprocess(i): + env = i['env'] + state = i['state'] + + if env.get('CM_HW_NAME', '') == '': + host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") + env['CM_HW_NAME'] = host_name + + device = env.get('CM_MLPERF_DEVICE', 'cpu') + + backend = env.get('CM_MLPERF_BACKEND', 'default') + if env.get('CM_MLPERF_BACKEND_VERSION', '') != '': + backend_version = "v" + env.get('CM_MLPERF_BACKEND_VERSION') if not env.get('CM_MLPERF_BACKEND_VERSION').startswith("v") else env.get('CM_MLPERF_BACKEND_VERSION') + else: + backend_version = 'vdefault' + + if 'CM_SUT_CONFIG' not in state: + state['CM_SUT_CONFIG'] = {} + if 'CM_SUT_CONFIG_PATH' not in state: + state['CM_SUT_CONFIG_PATH'] = {} + + implementation_string = env['CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX'] if env.get('CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX', '') != '' else env.get('CM_MLPERF_IMPLEMENTATION', 'default') + + run_config = [] + for i in range(1,5): + if env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}', '') != '': + run_config.append(env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}')) + + run_config_string = "_".join(run_config) if run_config else 'default_config' + + if env.get('CM_SUT_NAME', '') == '': + env['CM_SUT_NAME'] = env['CM_HW_NAME'] + "-" + implementation_string + "-" + device + "-" + backend + "-" + backend_version + "-" + run_config_string + + if env.get('CM_SUT_CONFIGS_PATH',''): + path = env['CM_SUT_CONFIGS_PATH'] + elif env.get('CM_SUT_USE_EXTERNAL_CONFIG_REPO', '') == "yes": + path = env.get('CM_GIT_CHECKOUT_PATH') + else: + path = os.path.join(os.getcwd(), "configs") + + config_path = os.path.join(path, env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "framework-version-"+backend_version, run_config_string + "-config.yaml") + if not os.path.exists(config_path): + os.makedirs(os.path.dirname(config_path), exist_ok=True) + config_path_default = os.path.join(path, env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "default-config.yaml") + if os.path.exists(config_path_default): + shutil.copy(config_path_default, config_path) + else: + print(f"Config file missing for given hw_name: '{env['CM_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") + src_config = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "default", "config.yaml") + shutil.copy(src_config, config_path) + os.makedirs(os.path.dirname(config_path_default), exist_ok=True) + shutil.copy(src_config, config_path_default) + + state['CM_SUT_CONFIG'][env['CM_SUT_NAME']] = yaml.load(open(config_path), Loader=yaml.SafeLoader) + state['CM_SUT_CONFIG_NAME'] = env['CM_SUT_NAME'] + state['CM_SUT_CONFIG_PATH'][env['CM_SUT_NAME']] = config_path + + return {'return':0} diff --git a/script/get-mlperf-inference-sut-description/README.md b/script/get-mlperf-inference-sut-description/README.md new file mode 100644 index 0000000000..e5ebccc115 --- /dev/null +++ b/script/get-mlperf-inference-sut-description/README.md @@ -0,0 +1,159 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-sut-description** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-sut-description,e49a3f758b2d4e7b) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-description)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,sut,description,system-under-test,system-description* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf sut description system-under-test system-description" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,sut,description,system-under-test,system-description` + +`cm run script --tags=get,mlperf,sut,description,system-under-test,system-description [--input_flags]` + +*or* + +`cmr "get mlperf sut description system-under-test system-description"` + +`cmr "get mlperf sut description system-under-test system-description " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,sut,description,system-under-test,system-description' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,sut,description,system-under-test,system-description"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,sut,description,system-under-test,system-description) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf sut description system-under-test system-description" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--name=value` → `CM_HW_NAME=value` +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "name":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_SUT_DESC_CACHE: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-description/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,compiler + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + * get,cuda-devices + * `if (CM_MLPERF_DEVICE in ['gpu', 'cuda'])` + - CM script: [get-cuda-devices](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda-devices) + * detect,sudo + * `if (CM_DETERMINE_MEMORY_CONFIGURATION == yes AND CM_HOST_OS_TYPE == linux)` + - CM script: [detect-sudo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-sudo) + * get,generic-python-lib,_package.dmiparser + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-description/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-description/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-description/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-description/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-sut-description/_cm.json) + +___ +### Script output +`cmr "get mlperf sut description system-under-test system-description " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_HW_*` +* `CM_SUT_*` +#### New environment keys auto-detected from customize + +* `CM_HW_NAME` \ No newline at end of file diff --git a/script/get-mlperf-inference-sut-description/_cm.json b/script/get-mlperf-inference-sut-description/_cm.json new file mode 100644 index 0000000000..4c3f998e57 --- /dev/null +++ b/script/get-mlperf-inference-sut-description/_cm.json @@ -0,0 +1,75 @@ +{ + "alias": "get-mlperf-inference-sut-description", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "names": [ + "compiler" + ], + "tags": "get,compiler" + }, + { + "tags": "get,cuda-devices", + "enable_if_env": { + "CM_MLPERF_DEVICE": [ + "gpu", + "cuda" + ] + } + }, + { + "tags": "detect,sudo", + "enable_if_env": { + "CM_DETERMINE_MEMORY_CONFIGURATION": [ + "yes" + ], + "CM_HOST_OS_TYPE": [ + "linux" + ] + } + }, + { + "tags": "get,generic-python-lib,_package.dmiparser" + } + ], + "default_env": { + "CM_SUT_DESC_CACHE": "no" + }, + "input_mapping": { + "name": "CM_HW_NAME", + "submitter": "CM_MLPERF_SUBMITTER" + }, + "new_env_keys": [ + "CM_HW_*", + "CM_SUT_*" + ], + "new_state_keys": [ + "CM_SUT_*", + "CM_HW_*" + ], + "tags": [ + "get", + "mlperf", + "sut", + "description", + "system-under-test", + "system-description" + ], + "uid": "e49a3f758b2d4e7b" +} diff --git a/script/get-mlperf-inference-sut-description/customize.py b/script/get-mlperf-inference-sut-description/customize.py new file mode 100644 index 0000000000..cd0c2f7544 --- /dev/null +++ b/script/get-mlperf-inference-sut-description/customize.py @@ -0,0 +1,158 @@ +from cmind import utils +import os +import json +import shutil + +def preprocess(i): + env = i['env'] + state = i['state'] + os_info = i['os_info'] + + submitter = env.get('CM_MLPERF_SUBMITTER', 'CTuning') + + auto_detected_hw_name = False + if env.get('CM_HW_NAME', '') == '': + host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") + env['CM_HW_NAME'] = host_name + auto_detected_hw_name = True + + hw_name = env['CM_HW_NAME'] + + backend = env.get('CM_MLPERF_BACKEND', '') + backend_version = env.get('CM_MLPERF_BACKEND_VERSION', '') + sut_suffix = '' + backend_name = '' + backend_desc = '' + if backend: + backend_name = env.get('CM_MLPERF_BACKEND_NAME', backend) + sut_suffix = "-" + backend + backend_desc = backend_name + if backend_version: + sut_suffix += "-" + backend_version + backend_desc += ' v' + backend_version + + sut = hw_name + sut_suffix + script_path = i['run_script_input']['path'] + sut_path = os.path.join(os.getcwd(), "suts", sut + ".json") + if os.path.exists(sut_path) and env.get('CM_SUT_DESC_CACHE', '') == "yes": + print(f"Reusing SUT description file {sut}") + state['CM_SUT_META'] = json.load(open(sut_path)) + else: + if not os.path.exists(os.path.dirname(sut_path)): + os.makedirs(os.path.dirname(sut_path)) + + print("Generating SUT description file for " + sut) + hw_path = os.path.join(os.getcwd(), "hardware", hw_name + ".json") + if not os.path.exists(os.path.dirname(hw_path)): + os.makedirs(os.path.dirname(hw_path)) + if not os.path.exists(hw_path): + default_hw_path = os.path.join(script_path, "hardware", "default.json") + print("HW description file for " + hw_name + " not found. Copying from default!!!") + shutil.copy(default_hw_path, hw_path) + + state['CM_HW_META'] = json.load(open(hw_path)) + state['CM_SUT_META'] = state['CM_HW_META'] + state['CM_SUT_META']['framework'] = backend_desc + os_name = env.get('CM_HOST_OS_FLAVOR', '').capitalize() + os_version = env.get('CM_HOST_OS_VERSION', '') + if os_name and os_version: + os_name_string = os_name + " " + os_version + else: + os_name_string = '' + os_type = env.get('CM_HOST_OS_TYPE', '') + kernel = env.get('CM_HOST_OS_KERNEL_VERSION', '') + if os_type and kernel: + os_name_string += " (" + os_type + "-" + kernel + glibc_version = env.get('CM_HOST_OS_GLIBC_VERSION', '') + if glibc_version: + os_name_string += '-glibc' + glibc_version + os_name_string += ')' + python_version = env.get('CM_PYTHON_VERSION', '') + compiler = env.get('CM_COMPILER_FAMILY', '') + compiler_version = env.get('CM_COMPILER_VERSION', '') + state['CM_SUT_META']['submitter'] = submitter + + # If Windows and os_name_string is empty, rebuild it: + + if os_name_string=='' and os_info['platform'] == 'windows': + import platform + os_name_string = str(platform.platform()) + + state['CM_SUT_META']['operating_system'] = os_name_string + + state['CM_SUT_META']['other_software_stack'] = "Python: " + python_version + ", " + compiler + "-" + compiler_version + + if state['CM_SUT_META'].get('system_name','') == '': + system_name = env.get('CM_MLPERF_SYSTEM_NAME') + if not system_name: + system_name = env.get('CM_HW_NAME') + if system_name: + if auto_detected_hw_name: + system_name+=" (auto detected)" + else: + system_name = " (generic)" + state['CM_SUT_META']['system_name'] = system_name + + # Add GPU info + if 'cm_cuda_device_prop' in state: + state['CM_SUT_META']['accelerator_frequency'] = state['cm_cuda_device_prop']['Max clock rate'] + state['CM_SUT_META']['accelerator_memory_capacity'] = str(int(state['cm_cuda_device_prop']['Global memory'])/(1024*1024.0*1024)) + " GB" + state['CM_SUT_META']['accelerator_model_name'] = state['cm_cuda_device_prop']['GPU Name'] + state['CM_SUT_META']['accelerators_per_node'] = "1" + + if state['CM_SUT_META'].get('host_processor_core_count', '') == '': + physical_cores_per_node = env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET') + + if physical_cores_per_node == None or physical_cores_per_node == '': + if os_info['platform'] == 'windows': + physical_cores_per_node = '1' + + state['CM_SUT_META']['host_processor_core_count'] = physical_cores_per_node + + if state['CM_SUT_META'].get('host_processor_model_name', '') == '': + state['CM_SUT_META']['host_processor_model_name'] = env.get('CM_HOST_CPU_MODEL_NAME', 'undefined') + if state['CM_SUT_META'].get('host_processors_per_node', '') == '': + x = env.get('CM_HOST_CPU_SOCKETS', '') + if x == '' and os_info['platform'] == 'windows': + x = '1' + state['CM_SUT_META']['host_processors_per_node'] = x + + if state['CM_SUT_META'].get('host_processor_caches', '') == '': + state['CM_SUT_META']['host_processor_caches'] = "L1d cache: " + env.get('CM_HOST_CPU_L1D_CACHE_SIZE', ' ') + \ + ", L1i cache: " + env.get('CM_HOST_CPU_L1I_CACHE_SIZE', ' ') + ", L2 cache: " + \ + env.get('CM_HOST_CPU_L2_CACHE_SIZE', ' ') + \ + ", L3 cache: " + env.get('CM_HOST_CPU_L3_CACHE_SIZE', ' ') + + if state['CM_SUT_META'].get('host_processor_frequency', '') == '': + state['CM_SUT_META']['host_processor_frequency'] = env.get('CM_HOST_CPU_MAX_MHZ') if env.get('CM_HOST_CPU_MAX_MHZ', '') != '' else 'undefined' + if state['CM_SUT_META'].get('host_memory_capacity', '') == '': + state['CM_SUT_META']['host_memory_capacity'] = env.get('CM_HOST_MEMORY_CAPACITY') if env.get('CM_HOST_MEMORY_CAPACITY', '') != '' else 'undefined' + if state['CM_SUT_META'].get('host_storage_capacity', '') == '': + state['CM_SUT_META']['host_storage_capacity'] = env.get('CM_HOST_DISK_CAPACITY') if env.get('CM_HOST_DISK_CAPACITY', '') != '' else 'undefined' + if 'CM_SUT_SW_NOTES' in env: + sw_notes = env['CM_SUT_SW_NOTES'] + else: + sw_notes = '' + state['CM_SUT_META']['sw_notes'] = sw_notes + + if env.get('CM_SUDO_USER', '') == "yes" and env.get('CM_HOST_OS_TYPE', 'linux'): + r = i['automation'].run_native_script({'run_script_input':i['run_script_input'], 'env':env, 'script_name':'detect_memory'}) + if r['return']>0: + return r + if env.get('CM_HOST_MEM_INFO', '') != '': + state['CM_SUT_META']['host_memory_configuration'] = env['CM_HOST_MEM_INFO'] + + + state['CM_SUT_META'] = dict(sorted(state['CM_SUT_META'].items())) + + sut_file = open(sut_path, "w") + json.dump(state['CM_SUT_META'], sut_file, indent = 4) + sut_file.close() + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/get-mlperf-inference-sut-description/detect_memory.sh b/script/get-mlperf-inference-sut-description/detect_memory.sh new file mode 100644 index 0000000000..edc338c799 --- /dev/null +++ b/script/get-mlperf-inference-sut-description/detect_memory.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [[ ${CM_SUDO_USER} == "yes" ]]; then + sudo dmidecode -t memory > meminfo.out + ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/get_memory_info.py +fi +test $? -eq 0 || return $? diff --git a/script/get-mlperf-inference-sut-description/get_memory_info.py b/script/get-mlperf-inference-sut-description/get_memory_info.py new file mode 100644 index 0000000000..aeadde557b --- /dev/null +++ b/script/get-mlperf-inference-sut-description/get_memory_info.py @@ -0,0 +1,60 @@ +import os +import json +from dmiparser import DmiParser + +with open("meminfo.out", "r") as f: + text = f.read() + parser = DmiParser(text, sort_keys=True, indent=4) + + parsedStr = str(parser) + parsedObj = json.loads(str(parser)) + memory = [] + + ind = 0; + needed_global_keys = ['Speed', 'Configured Memory Speed', 'Type'] + added_global_keys = [] + needed_keys = ['Size', 'Rank'] + + for item in parsedObj: + if item['name'] == 'Physical Memory Array': + ecc_value = item['props']['Error Correction Type']['values'][0] + if not ecc_value or 'None' in ecc_value: + ecc_value = "No ECC" + memory.append({"info": ['Error Correction Type: ' + ecc_value ]}) + ind += 1 + continue + if item['name'] != 'Memory Device': + continue + memory.append({}) + memory[ind]['handle'] = item['handle'] + memory[ind]['info'] = [] + locator = item['props']['Locator']['values'][0] + bank_locator = item['props']['Bank Locator']['values'][0] + + if not "Not Specified" in locator: + memory[ind]['info'].append(locator) + if not "Not Specified" in bank_locator: + memory[ind]['info'].append(bank_locator) + + if item['props']['Size']['values'][0] == "No Module Installed": + memory[ind]['populated'] = False + memory[ind]['info'].append("Unpopulated") + else: + memory[ind]['populated'] = True + + for key in item['props']: + if key in needed_global_keys and key not in added_global_keys: + memory[0]['info'].append(f'{key}: {";".join(item["props"][key]["values"])}') + added_global_keys.append(key) + elif key in needed_keys: + memory[ind]['info'].append(f'{key}: {";".join(item["props"][key]["values"])}') + ind+=1 + + meminfo = [] + for item in memory: + meminfo.append( "; ".join(item['info'])) + + meminfo_string =", ".join(meminfo) + with open("tmp-run-env.out", "w") as f: + f.write(f"CM_HOST_MEM_INFO={meminfo_string}") + diff --git a/script/get-mlperf-inference-sut-description/hardware/default.json b/script/get-mlperf-inference-sut-description/hardware/default.json new file mode 100644 index 0000000000..b7cf960dbb --- /dev/null +++ b/script/get-mlperf-inference-sut-description/hardware/default.json @@ -0,0 +1,26 @@ +{ + "accelerator_frequency": "", + "accelerator_host_interconnect": "N/A", + "accelerator_interconnect": "N/A", + "accelerator_interconnect_topology": "", + "accelerator_memory_capacity": "N/A", + "accelerator_memory_configuration": "N/A", + "accelerator_model_name": "N/A", + "accelerator_on-chip_memories": "", + "accelerators_per_node": "0", + "cooling": "air", + "division": "open", + "host_memory_configuration": "undefined", + "host_networking": "Gig Ethernet", + "host_network_card_count": "1", + "host_networking_topology": "N/A", + "host_processor_interconnect": "", + "host_storage_type": "SSD", + "hw_notes": "", + "number_of_nodes": "1", + "status": "available", + "submitter": "cTuning", + "sw_notes": "Automated by MLCommons CM", + "system_type": "edge", + "system_type_detail": "edge server" +} diff --git a/script/get-mlperf-inference-utils/README.md b/script/get-mlperf-inference-utils/README.md new file mode 100644 index 0000000000..693d9926ed --- /dev/null +++ b/script/get-mlperf-inference-utils/README.md @@ -0,0 +1,120 @@ +Automatically generated README for this automation recipe: **get-mlperf-inference-utils** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-inference-utils,e341e5f86d8342e5) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-utils)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,inference,util,utils,functions* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf inference util utils functions" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,inference,util,utils,functions` + +`cm run script --tags=get,mlperf,inference,util,utils,functions ` + +*or* + +`cmr "get mlperf inference util utils functions"` + +`cmr "get mlperf inference util utils functions " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,util,utils,functions' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,inference,util,utils,functions"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,inference,util,utils,functions) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf inference util utils functions" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-utils/_cm.yaml)*** + * get,mlperf,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-utils/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-utils/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-utils/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-utils/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-inference-utils/_cm.yaml) + +___ +### Script output +`cmr "get mlperf inference util utils functions " -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +#### New environment keys auto-detected from customize diff --git a/script/get-mlperf-inference-utils/_cm.yaml b/script/get-mlperf-inference-utils/_cm.yaml new file mode 100644 index 0000000000..bde11ac268 --- /dev/null +++ b/script/get-mlperf-inference-utils/_cm.yaml @@ -0,0 +1,18 @@ +alias: get-mlperf-inference-utils +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- get +- mlperf +- inference +- util +- utils +- functions +uid: e341e5f86d8342e5 +deps: + - tags: get,mlperf,inference,src + names: + - inference-src +new_env_keys: + - '+PYTHONPATH' diff --git a/script/get-mlperf-inference-utils/customize.py b/script/get-mlperf-inference-utils/customize.py new file mode 100644 index 0000000000..8c4bbbbbf4 --- /dev/null +++ b/script/get-mlperf-inference-utils/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os +import sys + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + utils_path = i['run_script_input']['path'] + + env['+PYTHONPATH'] = [ utils_path ] + + submission_checker_dir = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission") + + sys.path.append(submission_checker_dir) + sys.path.append(utils_path) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/get-mlperf-inference-utils/mlperf_utils.py b/script/get-mlperf-inference-utils/mlperf_utils.py new file mode 100644 index 0000000000..8b5b967a3a --- /dev/null +++ b/script/get-mlperf-inference-utils/mlperf_utils.py @@ -0,0 +1,301 @@ +import sys +import os +import submission_checker as checker +from log_parser import MLPerfLog + + +def get_result_from_log(version, model, scenario, result_path, mode): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + #scenario = checker.SCENARIO_MAPPING[scenario] + + result = '' + power_result = None + valid = {} + if mode == "performance": + has_power = os.path.exists(os.path.join(result_path, "..", "power")) + result_ = checker.get_performance_metric(config, mlperf_model, result_path, scenario, None, None, has_power) + mlperf_log = MLPerfLog(os.path.join(result_path, "mlperf_log_detail.txt")) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + valid['performance'] = False + else: + valid['performance'] = True + + if "stream" in scenario.lower(): + result = result_ / 1000000 #convert to milliseconds + else: + result = result_ + result = str(round(result, 3)) + + if has_power: + power_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric(config, scenario, result_path, True, result_) + power_result = f"{round(power_metric,3)},{round(avg_power_efficiency,3)}" + valid['power'] = power_valid + + + elif mode == "accuracy" and os.path.exists(os.path.join(result_path, 'accuracy.txt')): + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric(config, mlperf_model, result_path) + valid['accuracy'] = acc_valid + + if len(acc_results) == 1: + for acc in acc_results: + result = str(round(float(acc_results[acc]), 5)) + else: + result = '(' + result_list = [] + for i, acc in enumerate(acc_results): + result_list.append(str(round(float(acc_results[acc]), 5))) + result += ", ".join(result_list) + ")" + + return result, valid, power_result + +def get_accuracy_metric(config, model, path): + + import re + is_valid = False + all_accuracy_valid = True + acc = None + result_acc = None + target = config.get_accuracy_target(model) + acc_upper_limit = config.get_accuracy_upper_limit(model) + patterns = [] + acc_targets = [] + acc_limits = [] + up_patterns = [] + acc_types = [] + + if acc_upper_limit is not None: + acc_limit_check = True + for i in range(0, len(acc_upper_limit), 2): + acc_type, acc_target = acc_upper_limit[i:i+2] + acc_limits.append(acc_target) + up_patterns.append(checker.ACC_PATTERN[acc_type]) + + for i in range(0, len(target), 2): + acc_type, acc_target = target[i:i+2] + acc_types.append(acc_type) + patterns.append(checker.ACC_PATTERN[acc_type]) + acc_targets.append(acc_target) + + acc_seen = [False for _ in acc_targets] + acc_results = {} + with open(os.path.join(path, "accuracy.txt"), "r", encoding="utf-8") as f: + for line in f: + for i, (pattern, acc_target, acc_type) in enumerate(zip(patterns, acc_targets, acc_types)): + m = re.match(pattern, line) + if m: + acc = m.group(1) + + acc_results[acc_type] = acc + + if acc is not None and float(acc) >= acc_target: + all_accuracy_valid &= True + acc_seen[i] = True + elif acc is not None: + all_accuracy_valid = False + #log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) + if i == 0 and acc: + result_acc = acc + acc = None + if acc_upper_limit is not None: + for i, (pattern, acc_limit) in enumerate(zip(up_patterns, acc_limits)): + m = re.match(pattern, line) + if m: + acc = m.group(1) + if acc is not None and acc_upper_limit is not None and float(acc) > acc_limit: + acc_limit_check = False + #log.warning("%s accuracy not met: upper limit=%f, found=%s", path, acc_limit, acc) + acc = None + if all(acc_seen): + break; + is_valid = all_accuracy_valid & all(acc_seen) + if acc_upper_limit is not None: + is_valid &= acc_limit_check + + + return is_valid, acc_results, acc_targets, acc_limits + +def get_result_string(version, model, scenario, result_path, has_power, sub_res, division="open", system_json=None): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + performance_path = os.path.join(result_path, "performance", "run_1") + accuracy_path = os.path.join(result_path, "accuracy") + scenario = checker.SCENARIO_MAPPING[scenario] + + fname = os.path.join(performance_path, "mlperf_log_detail.txt") + mlperf_log = MLPerfLog(fname) + effective_scenario = mlperf_log["effective_scenario"] + inferred = False + result = {} + + + performance_result = checker.get_performance_metric(config, mlperf_model, performance_path, scenario, None, None, has_power) + if "stream" in scenario.lower(): + performance_result_ = performance_result / 1000000 #convert to milliseconds + else: + performance_result_ = performance_result + result['performance'] = round(performance_result_, 3) + + if scenario != effective_scenario: + inferred, inferred_result = checker.get_inferred_result(scenario, effective_scenario, performance_result, mlperf_log, config, False) + + if has_power: + is_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric(config, scenario, performance_path, True, performance_result) + if "stream" in scenario.lower(): + power_metric_unit = "milliJoules" + else: + power_metric_unit = "Watts" + power_result_string = f"`Power consumed`: `{round(power_metric, 3)} {power_metric_unit}`, `Power efficiency`: `{round(avg_power_efficiency * 1000, 3)} samples per Joule`" + + power_result = round(power_metric, 3) + power_efficiency_result = round(avg_power_efficiency, 3) + result['power'] = power_result + result['power_efficiency'] = power_efficiency_result + + compliance_list = [ "TEST01", "TEST05", "TEST04" ] + if division == "closed": + for test in compliance_list: + test_path = os.path.join(result_path, test) + if os.path.exists(test_path): #We dont consider missing test folders now - submission checker will do that + #test_pass = checker.check_compliance_dir(test_path, mlperf_model, scenario, config, "closed", system_json, sub_res) + test_pass = checker.check_compliance_perf_dir(test_path) + if test_pass and test in [ "TEST01", "TEST06" ]: + #test_pass = checker.check_compliance_acc_dir(test_path, mlperf_model, config) + pass # accuracy truncation script is done after submission generation. We assume here that it'll pass + if test_pass: + result[test] = "passed" + else: + result[test] = "failed" + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric(config, mlperf_model, accuracy_path) + + result_field = checker.RESULT_FIELD[effective_scenario] + + performance_result_string = f"`{result_field}`: `{performance_result}`\n" + if inferred: + inferred_result_field = checker.RESULT_FIELD[scenario] + performance_result_string += f"Inferred result: `{inferred_result_field}`: `{inferred_result}` \n" + + accuracy_result_string = '' + accuracy_results = [] + for i, acc in enumerate(acc_results): + accuracy_results.append(str(round(float(acc_results[acc]), 5))) + accuracy_result_string += f"`{acc}`: `{round(float(acc_results[acc]), 5)}`" + if not acc_limits: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}`" + else: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}` and `<= {round(acc_limits[i], 5)}`" + accuracy_result_string += "\n" + + if len(accuracy_results) == 1: + accuracy_result = accuracy_results[0] + else: + accuracy_result = "(" + ", ".join(accuracy_results)+")" + result['accuracy'] = accuracy_result + + result_string = f"\n\n## Results\n" + result_string += f"\nPlatform: {sub_res}\n" + result_string += "\n### Accuracy Results \n" + accuracy_result_string + result_string += "\n### Performance Results \n" + performance_result_string + if has_power: + result_string += "\n### Power Results \n" + power_result_string + + + return result_string, result + +def get_result_table(results): + + headers = ["Model", "Scenario", "Accuracy", "QPS", "Latency (in ms)", "Power Efficiency (in samples/J)", "TEST01", "TEST05", "TEST04"] + table = [] + for model in results: + for scenario in results[model]: + row = [] + row.append(model) + row.append(scenario) + if results[model][scenario].get('accuracy'): + val = str(results[model][scenario]['accuracy']) + if not results[model][scenario].get('accuracy_valid', True): + val = "X "+val + row.append(val) + else: + row.append("-") + + if results[model][scenario].get('performance'): + + if "stream" in scenario.lower(): + if float(results[model][scenario]['performance']) == 0: + row.append("-") + elif scenario.lower() == "singlestream": + val_qps = str(round(1000/float(results[model][scenario]['performance']), 3)) + if not results[model][scenario].get('performance_valid', True): # we explicitly mark invalid results + val_qps = "X "+val_qps + row.append(val_qps) + elif scenario.lower() == "multistream": + val_qps = str(round(8000/float(results[model][scenario]['performance']), 3)) + if not results[model][scenario].get('performance_valid', True): + val_qps = "X "+val_qps + row.append(val_qps) + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get('performance_valid', True): + val = "X "+val + row.append(val) + else: + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get('performance_valid', True): + val = "X "+val + row.append(val) + row.append("-") + + #if results[model][scenario].get('power','') != '': + # row.append(results[model][scenario]['power']) + if results[model][scenario].get('power_efficiency','') != '': + val = str(results[model][scenario]['power_efficiency']) + if not results[model][scenario].get('power_valid', True): + val = "X "+val + row.append(val) + else: + row.append(None) + + val1 = results[model][scenario].get('TEST01') + val2 = results[model][scenario].get('TEST05') + val3 = results[model][scenario].get('TEST04') + if val1: + row.append(val1) + if val2: + row.append(val2) + if val3: + row.append(val3) + elif val3: + row.append("missing") + row.append(val3) + + else: + if val2: + row.append("missing") + row.append(val2) + if val3: + row.append(val3) + elif val3: + row.append("missing") + row.append("missing") + row.append(val3) + + table.append(row) + + return table, headers diff --git a/script/get-mlperf-logging/README-extra.md b/script/get-mlperf-logging/README-extra.md new file mode 100644 index 0000000000..32392035fc --- /dev/null +++ b/script/get-mlperf-logging/README-extra.md @@ -0,0 +1,16 @@ +# Get MLCommons Training Source + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) +git clones and installs the [MLCommons Logging library]( https://github.com/mlcommons/logging ). + +## Commands + +To install +``` +cm run script --tags=get,mlperf,logging +``` +or + +``` +cmr "get mlperf logging" +``` diff --git a/script/get-mlperf-logging/README.md b/script/get-mlperf-logging/README.md new file mode 100644 index 0000000000..e0c1199071 --- /dev/null +++ b/script/get-mlperf-logging/README.md @@ -0,0 +1,129 @@ +Automatically generated README for this automation recipe: **get-mlperf-logging** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-logging,c9830dc6f87b4dc6) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-logging)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,mlperf,logging,mlperf-logging* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get mlperf logging mlperf-logging" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,mlperf,logging,mlperf-logging` + +`cm run script --tags=get,mlperf,logging,mlperf-logging ` + +*or* + +`cmr "get mlperf logging mlperf-logging"` + +`cmr "get mlperf logging mlperf-logging " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,logging,mlperf-logging' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,mlperf,logging,mlperf-logging"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlperf,logging,mlperf-logging) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get mlperf logging mlperf-logging" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-logging/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo,_repo.https://github.com/mlcommons/logging + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-logging/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-logging/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-logging/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-logging/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-logging/_cm.json) + +___ +### Script output +`cmr "get mlperf logging mlperf-logging " -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +* `CM_MLPERF_LOGGING_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_LOGGING_SRC_PATH` \ No newline at end of file diff --git a/script/get-mlperf-logging/_cm.json b/script/get-mlperf-logging/_cm.json new file mode 100644 index 0000000000..a81fc231c9 --- /dev/null +++ b/script/get-mlperf-logging/_cm.json @@ -0,0 +1,38 @@ +{ + "alias": "get-mlperf-logging", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,git,repo,_repo.https://github.com/mlcommons/logging", + "env": { + "CM_GIT_CHECKOUT": "master" + } + } + ], + "new_env_keys": [ + "CM_MLPERF_LOGGING_*", + "+PYTHONPATH" + ], + "tags": [ + "get", + "mlperf", + "logging", + "mlperf-logging" + ], + "uid": "c9830dc6f87b4dc6" +} diff --git a/script/get-mlperf-logging/customize.py b/script/get-mlperf-logging/customize.py new file mode 100644 index 0000000000..ac1a2a641e --- /dev/null +++ b/script/get-mlperf-logging/customize.py @@ -0,0 +1,21 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + meta = i['meta'] + + env['CM_MLPERF_LOGGING_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + + return {'return':0} + +def postprocess(i): + env = i['env'] + + env['+PYTHONPATH'] = [ env['CM_MLPERF_LOGGING_SRC_PATH'] ] + + return {'return':0} diff --git a/script/get-mlperf-power-dev/README.md b/script/get-mlperf-power-dev/README.md new file mode 100644 index 0000000000..7f755f413c --- /dev/null +++ b/script/get-mlperf-power-dev/README.md @@ -0,0 +1,173 @@ +Automatically generated README for this automation recipe: **get-mlperf-power-dev** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-power-dev,72aa56768c994bcf) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-power-dev)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,src,source,power,power-dev,mlperf,mlcommons* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get src source power power-dev mlperf mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,src,source,power,power-dev,mlperf,mlcommons` + +`cm run script --tags=get,src,source,power,power-dev,mlperf,mlcommons[,variations] ` + +*or* + +`cmr "get src source power power-dev mlperf mlcommons"` + +`cmr "get src source power power-dev mlperf mlcommons [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,power,power-dev,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,src,source,power,power-dev,mlperf,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,src,source,power,power-dev,mlperf,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get src source power power-dev mlperf mlcommons[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**checkout**" +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_sha.#` + - Environment variables: + - *CM_GIT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * **`_mlcommons`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/mlcommons/power-dev.git` + - Workflow: + * `_octoml` + - Environment variables: + - *CM_GIT_URL*: `https://github.com/octoml/power-dev.git` + - Workflow: + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + +
+ + +#### Default variations + +`_mlcommons` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_DEPTH: `--depth 1` +* CM_GIT_PATCH: `no` +* CM_GIT_CHECKOUT_FOLDER: `power-dev` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-power-dev/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-power-dev/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-power-dev/_cm.json)*** + * get,git,repo + * CM names: `--adr.['mlperf-power-dev-git-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-power-dev/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-power-dev/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-power-dev/_cm.json) + +___ +### Script output +`cmr "get src source power power-dev mlperf mlcommons [,variations]" -j` +#### New environment keys (filter) + +* `CM_MLPERF_POWER_SOURCE` +#### New environment keys auto-detected from customize diff --git a/script/get-mlperf-power-dev/_cm.json b/script/get-mlperf-power-dev/_cm.json new file mode 100644 index 0000000000..3adb7b3dbf --- /dev/null +++ b/script/get-mlperf-power-dev/_cm.json @@ -0,0 +1,87 @@ +{ + "alias": "get-mlperf-power-dev", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + "CM_GIT_DEPTH": "--depth 1", + "CM_GIT_PATCH": "no", + "CM_GIT_CHECKOUT_FOLDER": "power-dev" + }, + "deps": [ + ], + "new_env_keys": [ + "CM_MLPERF_POWER_SOURCE" + ], + "prehook_deps": [ + { + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_repo.": [ "CM_GIT_URL" ], + "_branch.": [ "CM_GIT_CHECKOUT" ], + "_tag.": [ "CM_GIT_CHECKOUT_TAG" ], + "_sha.": [ "CM_GIT_SHA" ] + }, + "force_env_keys": [ + "CM_GIT_*" + ], + "names": [ + "mlperf-power-dev-git-repo" + ], + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_POWER_SOURCE" + }, + "extra_cache_tags": "mlperf,power,power-dev,src" + } + ], + "tags": [ + "get", + "src", + "source", + "power", + "power-dev", + "mlperf", + "mlcommons" + ], + "uid": "72aa56768c994bcf", + "variations": { + "octoml": { + "group": "repo", + "env": { + "CM_GIT_URL": "https://github.com/octoml/power-dev.git" + } + }, + "mlcommons": { + "group": "repo", + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/mlcommons/power-dev.git" + } + }, + "repo.#": { + "group": "repo", + "env": { + "CM_GIT_URL" : "#" + } + }, + "branch.#": { + "group": "checkout", + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "sha.#": { + "group": "checkout", + "env": { + "CM_GIT_SHA": "#" + } + }, + "tag.#": { + "group": "checkout", + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + } +} diff --git a/script/get-mlperf-power-dev/customize.py b/script/get-mlperf-power-dev/customize.py new file mode 100644 index 0000000000..2af085d740 --- /dev/null +++ b/script/get-mlperf-power-dev/customize.py @@ -0,0 +1,21 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + if env.get('CM_VERSION', '') == '': + env['CM_VERSION'] = "master" + + if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': + env['CM_VERSION'] += "-git-"+env['CM_GIT_REPO_CURRENT_HASH'] + + return {'return':0} diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/README.md b/script/get-mlperf-tiny-eembc-energy-runner-src/README.md new file mode 100644 index 0000000000..ec13f5a826 --- /dev/null +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/README.md @@ -0,0 +1,131 @@ +Automatically generated README for this automation recipe: **get-mlperf-tiny-eembc-energy-runner-src** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-tiny-eembc-energy-runner-src,c7da8d1ce4164a4b) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner` + +`cm run script --tags=get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner ` + +*or* + +`cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner"` + +`cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT: `main` +* CM_GIT_PATCH: `no` +* CM_GIT_RECURSE_SUBMODULES: `` +* CM_GIT_URL: `https://github.com/eembc/energyrunner` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json) + +___ +### Script output +`cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner " -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +* `CM_EEMBC_ENERGY_RUNNER_*` +#### New environment keys auto-detected from customize + +* `CM_EEMBC_ENERGY_RUNNER_DATASETS` +* `CM_EEMBC_ENERGY_RUNNER_SESSIONS` +* `CM_EEMBC_ENERGY_RUNNER_SRC` +* `CM_EEMBC_ENERGY_RUNNER_SRC_DATASETS` \ No newline at end of file diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json b/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json new file mode 100644 index 0000000000..67579d7792 --- /dev/null +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json @@ -0,0 +1,28 @@ +{ + "alias": "get-mlperf-tiny-eembc-energy-runner-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + "CM_GIT_CHECKOUT": "main", + "CM_GIT_PATCH": "no", + "CM_GIT_RECURSE_SUBMODULES": "", + "CM_GIT_URL": "https://github.com/eembc/energyrunner" + }, + "new_env_keys": [ + "CM_EEMBC_ENERGY_RUNNER_*", + "+PYTHONPATH" + ], + "tags": [ + "get", + "src", + "source", + "eembc", + "energyrunner", + "energy-runner", + "eembc-energy-runner", + "tinymlperf-energy-runner" + ], + "uid": "c7da8d1ce4164a4b" +} diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py new file mode 100644 index 0000000000..93a162b980 --- /dev/null +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py @@ -0,0 +1,58 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + env['CM_EEMBC_ENERGY_RUNNER_SRC'] = os.path.join(os.getcwd(), 'src') + datasets_src_path = os.path.join(os.getcwd(), 'src', 'datasets') + env['CM_EEMBC_ENERGY_RUNNER_SRC_DATASETS'] = datasets_src_path + + # Get user directory for EEMBC runner path + home_directory = os.path.expanduser( '~' ) + + sessions_path = os.path.join(home_directory, 'eembc', 'runner', 'sessions') + + print ('') + print ('Path to EEMBC runner sessions: {}'.format(sessions_path)) + + env['CM_EEMBC_ENERGY_RUNNER_SESSIONS'] = sessions_path + + if not os.path.isdir(sessions_path): + os.makedirs(sessions_path) + + datasets_path = os.path.join(home_directory, 'eembc', 'runner', 'benchmarks', 'ulp-mlperf', 'datasets') + + print ('') + print ('Path to EEMBC runner datasets: {}'.format(datasets_path)) + + if not os.path.isdir(datasets_path): + os.makedirs(datasets_path) + + env['CM_EEMBC_ENERGY_RUNNER_DATASETS'] = datasets_path + + print ('') + print ('Copying datasets to EEMBC user space ...') + + shutil.copytree(datasets_src_path, datasets_path, dirs_exist_ok=True) + + return {'return':0} diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat b/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat new file mode 100644 index 0000000000..799902b4d9 --- /dev/null +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat @@ -0,0 +1,72 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +echo ****************************************************** +echo Cloning EEMBC Energy Runner from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... + +set folder=src + +if not exist %folder% ( + + if not "%CM_GIT_SHA%" == "" ( + git clone %CM_GIT_RECURSE_SUBMODULES% -b "%CM_GIT_CHECKOUT%" %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + ) else ( + git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + + git checkout "%CM_GIT_CHECKOUT%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) else ( + + cd %folder% + +) + + +if not "%CM_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%CM_GIT_SUBMODULES%") do ( + echo. + echo Initializing submodule %%s + git submodule update --init %%s + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + + +if "%CM_GIT_PATCH%" == "yes" ( + echo Git patching is not yet implemented in CM script "get-mlperf-tiny-src" - please add it! + pause + + rem set patch_filename=%CM_GIT_PATCH_FILENAME% + rem if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then + rem patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} + rem CM_GIT_PATCH_FILENAMES=$patchfile + rem fi + rem + rem IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + rem + rem for patch_filename in "${patch_files[@]}" + rem do + rem echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + rem git apply ${SCRIPT_DIR}/patch/"$patch_filename" + rem if [ "${?}" != "0" ]; then exit 1; fi + rem done + +) + +rem Based on https://github.com/mwangistan/inference +for %%f in (%SCRIPT_DIR%\patch\windows-*) do ( + echo %%f + patch -p1 < %%f +) + + +cd %CUR_DIR% + +exit /b 0 diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh b/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh new file mode 100644 index 0000000000..ea2645f7e5 --- /dev/null +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning EEMBC Energy Runner from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES} ..." + +if [ ! -d "src" ]; then + if [ -z ${CM_GIT_SHA} ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + else + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + git checkout -b "${CM_GIT_CHECKOUT}" + fi + if [ "${?}" != "0" ]; then exit 1; fi +else + cd src +fi + +IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" + +for submodule in "${submodules[@]}" +do + echo "Initializing submodule ${submodule}" + git submodule update --init "${submodule}" + if [ "${?}" != "0" ]; then exit 1; fi +done + +if [ ${CM_GIT_PATCH} == "yes" ]; then + patch_filename=${CM_GIT_PATCH_FILENAME:-git.patch} + echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + git apply ${SCRIPT_DIR}/patch/"$patch_filename" + if [ "${?}" != "0" ]; then exit 1; fi +fi + +cd "$CUR_DIR" diff --git a/script/get-mlperf-tiny-src/README.md b/script/get-mlperf-tiny-src/README.md new file mode 100644 index 0000000000..dab294c761 --- /dev/null +++ b/script/get-mlperf-tiny-src/README.md @@ -0,0 +1,145 @@ +Automatically generated README for this automation recipe: **get-mlperf-tiny-src** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-tiny-src,777843a0bb034524) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons` + +`cm run script --tags=get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons ` + +*or* + +`cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons"` + +`cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT: `master` +* CM_GIT_PATCH: `no` +* CM_GIT_RECURSE_SUBMODULES: `` +* CM_GIT_URL: `https://github.com/mlcommons/tiny.git` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-tiny-src/_cm.json) + +___ +### Script output +`cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons " -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +* `CM_MLPERF_TINY_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_TINY_BENCHMARK` +* `CM_MLPERF_TINY_DATASETS` +* `CM_MLPERF_TINY_DATASETS_AD` +* `CM_MLPERF_TINY_DATASETS_IC` +* `CM_MLPERF_TINY_DATASETS_KWS` +* `CM_MLPERF_TINY_DATASETS_KWS_OPEN` +* `CM_MLPERF_TINY_DATASETS_VWW` +* `CM_MLPERF_TINY_SRC` +* `CM_MLPERF_TINY_TRAINING` +* `CM_MLPERF_TINY_TRAINING_AD` +* `CM_MLPERF_TINY_TRAINING_IC` +* `CM_MLPERF_TINY_TRAINING_KWS` +* `CM_MLPERF_TINY_TRAINING_VWW` \ No newline at end of file diff --git a/script/get-mlperf-tiny-src/_cm.json b/script/get-mlperf-tiny-src/_cm.json new file mode 100644 index 0000000000..4a73973593 --- /dev/null +++ b/script/get-mlperf-tiny-src/_cm.json @@ -0,0 +1,42 @@ +{ + "alias": "get-mlperf-tiny-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + "CM_GIT_CHECKOUT": "master", + "CM_GIT_PATCH": "no", + "CM_GIT_RECURSE_SUBMODULES": "", + "CM_GIT_URL": "https://github.com/mlcommons/tiny.git" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ], + "new_env_keys": [ + "CM_MLPERF_TINY_*", + "+PYTHONPATH" + ], + "tags": [ + "get", + "src", + "source", + "tiny", + "tiny-src", + "tiny-source", + "tinymlperf", + "tinymlperf-src", + "mlperf", + "mlcommons" + ], + "uid": "777843a0bb034524" +} diff --git a/script/get-mlperf-tiny-src/customize.py b/script/get-mlperf-tiny-src/customize.py new file mode 100644 index 0000000000..f361f009f8 --- /dev/null +++ b/script/get-mlperf-tiny-src/customize.py @@ -0,0 +1,46 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + env['CM_MLPERF_TINY_SRC'] = os.path.join(os.getcwd(), 'src') + env['CM_MLPERF_TINY_BENCHMARK'] = os.path.join(os.getcwd(), 'src', 'benchmark') + env['CM_MLPERF_TINY_DATASETS'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets') + env['CM_MLPERF_TINY_DATASETS_AD'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ad01') + env['CM_MLPERF_TINY_DATASETS_IC'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ic01') + env['CM_MLPERF_TINY_DATASETS_KWS'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01') + env['CM_MLPERF_TINY_DATASETS_KWS_OPEN'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01-open') + env['CM_MLPERF_TINY_DATASETS_VWW'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'vww01') + env['CM_MLPERF_TINY_TRAINING'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training') + env['CM_MLPERF_TINY_TRAINING_AD'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'anomaly_detection') + env['CM_MLPERF_TINY_TRAINING_IC'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'image_classification') + env['CM_MLPERF_TINY_TRAINING_KWS'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'keyword_spotting') + env['CM_MLPERF_TINY_TRAINING_VWW'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'visual_wake_words') + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] +# env['+PYTHONPATH']=[] +# env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) +# env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) + + return {'return':0} diff --git a/script/get-mlperf-tiny-src/run.bat b/script/get-mlperf-tiny-src/run.bat new file mode 100644 index 0000000000..e94998ad76 --- /dev/null +++ b/script/get-mlperf-tiny-src/run.bat @@ -0,0 +1,72 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +echo ****************************************************** +echo Cloning MLCommons from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... + +set folder=src + +if not exist %folder% ( + + if not "%CM_GIT_SHA%" == "" ( + git clone %CM_GIT_RECURSE_SUBMODULES% -b "%CM_GIT_CHECKOUT%" %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + ) else ( + git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + + git checkout "%CM_GIT_CHECKOUT%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) else ( + + cd %folder% + +) + + +if not "%CM_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%CM_GIT_SUBMODULES%") do ( + echo. + echo Initializing submodule %%s + git submodule update --init %%s + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + + +if "%CM_GIT_PATCH%" == "yes" ( + echo Git patching is not yet implemented in CM script "get-mlperf-tiny-src" - please add it! + pause + + rem set patch_filename=%CM_GIT_PATCH_FILENAME% + rem if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then + rem patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} + rem CM_GIT_PATCH_FILENAMES=$patchfile + rem fi + rem + rem IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + rem + rem for patch_filename in "${patch_files[@]}" + rem do + rem echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + rem git apply ${SCRIPT_DIR}/patch/"$patch_filename" + rem if [ "${?}" != "0" ]; then exit 1; fi + rem done + +) + +rem Based on https://github.com/mwangistan/inference +for %%f in (%SCRIPT_DIR%\patch\windows-*) do ( + echo %%f + patch -p1 < %%f +) + + +cd %CUR_DIR% + +exit /b 0 diff --git a/script/get-mlperf-tiny-src/run.sh b/script/get-mlperf-tiny-src/run.sh new file mode 100644 index 0000000000..e625891acf --- /dev/null +++ b/script/get-mlperf-tiny-src/run.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning MLCommons from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES} ..." + +if [ ! -d "src" ]; then + if [ -z ${CM_GIT_SHA} ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + else + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + git checkout -b "${CM_GIT_CHECKOUT}" + fi + if [ "${?}" != "0" ]; then exit 1; fi +else + cd src +fi + +IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" + +for submodule in "${submodules[@]}" +do + echo "Initializing submodule ${submodule}" + git submodule update --init "${submodule}" + if [ "${?}" != "0" ]; then exit 1; fi +done + +if [ ${CM_GIT_PATCH} == "yes" ]; then + patch_filename=${CM_GIT_PATCH_FILENAME:-git.patch} + echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + git apply ${SCRIPT_DIR}/patch/"$patch_filename" + if [ "${?}" != "0" ]; then exit 1; fi +fi + +cd "$CUR_DIR" diff --git a/script/get-mlperf-training-nvidia-code/README.md b/script/get-mlperf-training-nvidia-code/README.md new file mode 100644 index 0000000000..433b472372 --- /dev/null +++ b/script/get-mlperf-training-nvidia-code/README.md @@ -0,0 +1,160 @@ +Automatically generated README for this automation recipe: **get-mlperf-training-nvidia-code** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-training-nvidia-code,fdc630b1d41743c5) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-nvidia-code)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,nvidia,mlperf,training,code,training-code* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get nvidia mlperf training code training-code" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,nvidia,mlperf,training,code,training-code` + +`cm run script --tags=get,nvidia,mlperf,training,code,training-code[,variations] ` + +*or* + +`cmr "get nvidia mlperf training code training-code"` + +`cmr "get nvidia mlperf training code training-code [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,nvidia,mlperf,training,code,training-code' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,nvidia,mlperf,training,code,training-code"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,nvidia,mlperf,training,code,training-code) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get nvidia mlperf training code training-code[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**repo-owner**" +
+ Click here to expand this section. + + * `_ctuning` + - Environment variables: + - *CM_TMP_TRAINING_SRC*: `ctuning` + - Workflow: + * `_custom` + - Workflow: + * **`_mlcommons`** (default) + - Environment variables: + - *CM_TMP_TRAINING_SRC*: `mlcommons` + - Workflow: + * `_nvidia-only` + - Environment variables: + - *CM_TMP_TRAINING_SRC*: `GATEOverflow` + - Workflow: + +
+ + +#### Default variations + +`_mlcommons` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `r3.0` + +* `r2.1` +* `r3.0` +* `r3.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-nvidia-code/_cm.json)*** + * get,git,repo + * CM names: `--adr.['mlperf-training-results']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-nvidia-code/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-nvidia-code/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-nvidia-code/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-nvidia-code/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-nvidia-code/_cm.json) + +___ +### Script output +`cmr "get nvidia mlperf training code training-code [,variations]" -j` +#### New environment keys (filter) + +* `CM_MLPERF_TRAINING_NVIDIA_CODE_PATH` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_TRAINING_NVIDIA_CODE_PATH` \ No newline at end of file diff --git a/script/get-mlperf-training-nvidia-code/_cm.json b/script/get-mlperf-training-nvidia-code/_cm.json new file mode 100644 index 0000000000..457e2cd289 --- /dev/null +++ b/script/get-mlperf-training-nvidia-code/_cm.json @@ -0,0 +1,79 @@ +{ + "alias": "get-mlperf-training-nvidia-code", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "clean_files": [], + "default_version": "r3.0", + "deps": [ + { + "names": [ + "mlperf-training-results" + ], + "extra_cache_tags": "mlperf,training,results", + "tags": "get,git,repo", + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_TRAINING_RESULTS_PATH" + }, + "update_tags_from_env_with_prefix": { + "_repo.": [ + "CM_NVIDIA_CODE_DOWNLOAD_URL" + ] + } + } + ], + "new_env_keys": [ + "CM_MLPERF_TRAINING_NVIDIA_CODE_PATH" + ], + "tags": [ + "get", + "nvidia", + "mlperf", + "training", + "code", + "training-code" + ], + "uid": "fdc630b1d41743c5", + "variations": { + "ctuning": { + "group": "repo-owner", + "env": { + "CM_TMP_TRAINING_SRC": "ctuning" + } + }, + "custom": { + "group": "repo-owner" + }, + "mlcommons": { + "default": true, + "group": "repo-owner", + "env": { + "CM_TMP_TRAINING_SRC": "mlcommons" + } + }, + "nvidia-only": { + "group": "repo-owner", + "env": { + "CM_TMP_TRAINING_SRC": "GATEOverflow" + } + } + }, + "versions": { + "r3.1": { + "env": { + "CM_NVIDIA_CODE_DOWNLOAD_URL": "https://github.com/<<>>/training_results_v3.1" + } + }, + "r3.0": { + "env": { + "CM_NVIDIA_CODE_DOWNLOAD_URL": "https://github.com/<<>>/training_results_v3.0" + } + }, + "r2.1": { + "env": { + "CM_NVIDIA_CODE_DOWNLOAD_URL": "https://github.com/<<>>/training_results_v2.1" + } + } + } +} diff --git a/script/get-mlperf-training-nvidia-code/customize.py b/script/get-mlperf-training-nvidia-code/customize.py new file mode 100644 index 0000000000..a58acfbad3 --- /dev/null +++ b/script/get-mlperf-training-nvidia-code/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return':0} + + +def postprocess(i): + env = i['env'] + + env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] = os.path.join(env['CM_MLPERF_TRAINING_RESULTS_PATH'], "NVIDIA") + if not os.path.exists(env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH']): + return {'return': 1, 'error': f'Nvidia code path not found in the repository{env["CM_MLPERF_TRAINING_RESULTS_PATH"]}'} + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] + + return {'return':0} diff --git a/script/get-mlperf-training-src/README-extra.md b/script/get-mlperf-training-src/README-extra.md new file mode 100644 index 0000000000..08293c98bb --- /dev/null +++ b/script/get-mlperf-training-src/README-extra.md @@ -0,0 +1,27 @@ +# Get MLCommons Training Source +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Training repository](https://github.com/mlcommons/training). + +## Commands +To install +``` +cm run script --tags=get,mlperf,training,src,[VARIATION] --version=[VERSION] +``` +where [VARIATION] is one of +* `default:` Works with the official MLCommons inference repository. Uses `short-history` variation +* `patch:` Applies the `git.patch` to the cloned git repository +* `octoml:` Works with the OctoML fork of the MLCommons inference repository. Uses `short-history` variation +* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) +* `full-history:` Uses the full git history +* `no-recurse-submodules:` Only download the main repository + +[VERSION] is one of +* `master:` Uses the master branch +* `r2.1:` Uses the release branch used for MLCommons training 2.1 round + +## Exported Variables +* `CM_MLPERF_TRAINING_SOURCE`: Directory path of the cloned inference repository +* `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-mlperf-training-src/README.md b/script/get-mlperf-training-src/README.md new file mode 100644 index 0000000000..df0104bb64 --- /dev/null +++ b/script/get-mlperf-training-src/README.md @@ -0,0 +1,226 @@ +Automatically generated README for this automation recipe: **get-mlperf-training-src** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-mlperf-training-src,dc440bd88e794a28) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,src,source,training,training-src,training-source,mlperf,mlcommons* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get src source training training-src training-source mlperf mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,src,source,training,training-src,training-source,mlperf,mlcommons` + +`cm run script --tags=get,src,source,training,training-src,training-source,mlperf,mlcommons[,variations] ` + +*or* + +`cmr "get src source training training-src training-source mlperf mlcommons"` + +`cmr "get src source training training-src training-source mlperf mlcommons [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,training,training-src,training-source,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,src,source,training,training-src,training-source,mlperf,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,src,source,training,training-src,training-source,mlperf,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get src source training training-src training-source mlperf mlcommons[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_no-recurse-submodules` + - Environment variables: + - *CM_GIT_RECURSE_SUBMODULES*: `` + - Workflow: + * `_nvidia-retinanet` + - Environment variables: + - *CM_GIT_PATCH_FILENAMES*: `nvidia-retinanet.patch,cpu_load.patch` + - Workflow: + * `_patch` + - Environment variables: + - *CM_GIT_PATCH*: `yes` + - Workflow: + +
+ + + * Group "**checkout**" +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_sha.#` + - Environment variables: + - *CM_GIT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**git-history**" +
+ Click here to expand this section. + + * `_full-history` + - Environment variables: + - *CM_GIT_DEPTH*: `` + - Workflow: + * **`_short-history`** (default) + - Environment variables: + - *CM_GIT_DEPTH*: `--depth 5` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + +
+ + + * Group "**src**" +
+ Click here to expand this section. + + * **`_cknowledge`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/cknowledge/training.git` + - Workflow: + * `_mlcommons` + - Environment variables: + - *CM_GIT_URL*: `https://github.com/mlcommons/training.git` + - Workflow: + +
+ + +#### Default variations + +`_cknowledge,_short-history` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT: `master` +* CM_GIT_DEPTH: `--depth 4` +* CM_GIT_PATCH: `no` +* CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` +* CM_GIT_CHECKOUT_FOLDER: `training` + +
+ +#### Versions +Default version: `master` + +* `custom` +* `master` +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-src/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-src/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-src/_cm.json)*** + * get,git,repo + * CM names: `--adr.['mlperf-training-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-mlperf-training-src/_cm.json) + +___ +### Script output +`cmr "get src source training training-src training-source mlperf mlcommons [,variations]" -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +* `CM_MLPERF_TRAINING_*` +* `CM_MLPERF_TRAINING_LAST_RELEASE` +#### New environment keys auto-detected from customize diff --git a/script/get-mlperf-training-src/_cm.json b/script/get-mlperf-training-src/_cm.json new file mode 100644 index 0000000000..194ab8d736 --- /dev/null +++ b/script/get-mlperf-training-src/_cm.json @@ -0,0 +1,134 @@ +{ + "alias": "get-mlperf-training-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + "CM_GIT_CHECKOUT": "master", + "CM_GIT_DEPTH": "--depth 4", + "CM_GIT_PATCH": "no", + "CM_GIT_RECURSE_SUBMODULES": " --recurse-submodules", + "CM_GIT_CHECKOUT_FOLDER": "training" + }, + "default_version": "master", + "new_env_keys": [ + "CM_MLPERF_TRAINING_*", + "CM_MLPERF_TRAINING_LAST_RELEASE", + "+PYTHONPATH" + ], + "prehook_deps": [ + { + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_repo.": [ "CM_GIT_URL" ], + "_branch.": [ "CM_GIT_CHECKOUT" ], + "_tag.": [ "CM_GIT_CHECKOUT_TAG" ], + "_sha.": [ "CM_GIT_SHA" ] + }, + "force_env_keys": [ + "CM_GIT_*" + ], + "names": [ + "mlperf-training-repo" + ], + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_TRAINING_SOURCE" + }, + "extra_cache_tags": "mlperf,training,src" + } + ], + "tags": [ + "get", + "src", + "source", + "training", + "training-src", + "training-source", + "mlperf", + "mlcommons" + ], + "uid": "dc440bd88e794a28", + "variations": { + "mlcommons": { + "group": "src", + "env": { + "CM_GIT_URL": "https://github.com/mlcommons/training.git" + } + }, + "repo.#": { + "group": "repo", + "env": { + "CM_GIT_URL" : "#" + } + }, + "branch.#": { + "group": "checkout", + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "sha.#": { + "group": "checkout", + "env": { + "CM_GIT_SHA": "#" + } + }, + "tag.#": { + "group": "checkout", + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + }, + "full-history": { + "group": "git-history", + "env": { + "CM_GIT_DEPTH": "" + } + }, + "no-recurse-submodules": { + "env": { + "CM_GIT_RECURSE_SUBMODULES": "" + } + }, + "patch": { + "env": { + "CM_GIT_PATCH": "yes" + } + }, + "short-history": { + "group": "git-history", + "default": true, + "env": { + "CM_GIT_DEPTH": "--depth 5" + } + }, + "nvidia-retinanet": { + "base": [ + "patch" + ], + "env": { + "CM_GIT_PATCH_FILENAMES": "nvidia-retinanet.patch,cpu_load.patch" + } + }, + "cknowledge": { + "group": "src", + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/cknowledge/training.git" + } + } + }, + "versions": { + "custom": { + "env": { + "CM_MLPERF_LAST_RELEASE": "custom" + } + }, + "master": { + "env": { + "CM_MLPERF_LAST_RELEASE": "v3.1" + } + } + } +} diff --git a/script/get-mlperf-training-src/customize.py b/script/get-mlperf-training-src/customize.py new file mode 100644 index 0000000000..d2de607ed2 --- /dev/null +++ b/script/get-mlperf-training-src/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + script_path = i['run_script_input']['path'] + + if env.get('CM_GIT_PATCH_FILENAMES', '') != '': + patch_files = env['CM_GIT_PATCH_FILENAMES'].split(",") + patch_files_full_paths = [] + for patch_file in patch_files: + patch_file_full_path = os.path.join(script_path, "patch", patch_file) + patch_files_full_paths.append(patch_file_full_path) + env['CM_GIT_PATCH_FILEPATHS'] = ",".join(patch_files_full_paths) + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return':0} diff --git a/script/get-mlperf-training-src/patch/cpu_load.patch b/script/get-mlperf-training-src/patch/cpu_load.patch new file mode 100644 index 0000000000..b72537696e --- /dev/null +++ b/script/get-mlperf-training-src/patch/cpu_load.patch @@ -0,0 +1,16 @@ +diff --git a/single_stage_detector/ssd/pth_to_onnx.py b/single_stage_detector/ssd/pth_to_onnx.py +index 93679cd..6146d49 100755 +--- a/single_stage_detector/ssd/pth_to_onnx.py ++++ b/single_stage_detector/ssd/pth_to_onnx.py +@@ -54,7 +54,10 @@ def main(args): + model.to(device) + + print("Loading model") +- checkpoint = torch.load(args.input) ++ if args.device == "cpu": ++ checkpoint = torch.load(args.input, map_location=torch.device('cpu')) ++ else: ++ checkpoint = torch.load(args.input) + + # For some reason the batchnorms in the checkpoint do not have the same sizes as the module object. The checkpoint + # batchnorms have a size of [1, N, 1, 1], while the model batchnorms just have a size of [N]. diff --git a/script/get-mlperf-training-src/patch/nvidia-retinanet.patch b/script/get-mlperf-training-src/patch/nvidia-retinanet.patch new file mode 100644 index 0000000000..7256a1accf --- /dev/null +++ b/script/get-mlperf-training-src/patch/nvidia-retinanet.patch @@ -0,0 +1,170 @@ +diff --git a/single_stage_detector/ssd/model/retinanet.py b/single_stage_detector/ssd/model/retinanet.py +index 2f10d96..cdba3be 100644 +--- a/single_stage_detector/ssd/model/retinanet.py ++++ b/single_stage_detector/ssd/model/retinanet.py +@@ -12,6 +12,7 @@ from model.transform import GeneralizedRCNNTransform + from model.backbone_utils import resnet_fpn_backbone, _validate_trainable_layers + from model.feature_pyramid_network import LastLevelP6P7 + from model.focal_loss import sigmoid_focal_loss ++from model.image_list import ImageList + from model.boxes import box_iou, clip_boxes_to_image, batched_nms + from model.utils import Matcher, overwrite_eps, BoxCoder + +@@ -510,7 +511,13 @@ class RetinaNet(nn.Module): + original_image_sizes.append((val[0], val[1])) + + # transform the input +- images, targets = self.transform(images, targets) ++ # images, targets = self.transform(images, targets) ++ _image_sizes = [img.shape[-2:] for img in images] ++ for _size in _image_sizes: ++ assert len(_size) == 2 and _size[0] == 800 and _size[1] == 800 ++ # print(type(images)) ++ # images = ImageList(torch.stack(images), _image_sizes) ++ images = ImageList(images, _image_sizes) + + # Check for degenerate boxes + # TODO: Move this to a function +@@ -539,7 +546,11 @@ class RetinaNet(nn.Module): + + # compute the retinanet heads outputs using the features + head_outputs = self.head(features) ++ for k, v in head_outputs.items(): ++ print(f"{k}: {v.size()}") ++ return head_outputs + ++ """ + # create the set of anchors + anchors = self.anchor_generator(images, features) + +@@ -576,6 +587,7 @@ class RetinaNet(nn.Module): + self._has_warned = True + return losses, detections + return self.eager_outputs(losses, detections) ++ """ + + + model_urls = { +diff --git a/single_stage_detector/scripts/pth_to_onnx.py b/single_stage_detector/ssd/pth_to_onnx.py +similarity index 65% +rename from single_stage_detector/scripts/pth_to_onnx.py +rename to single_stage_detector/ssd/pth_to_onnx.py +index 78945aa..93679cd 100755 +--- a/single_stage_detector/scripts/pth_to_onnx.py ++++ b/single_stage_detector/ssd/pth_to_onnx.py +@@ -8,7 +8,7 @@ from torch.autograd import Variable + + from model.retinanet import retinanet_from_backbone + +-def parse_args(add_help=True): ++def parse_args(add_help=True, custom_argv=None): + parser = argparse.ArgumentParser(description='Convert PyTorch detection file to onnx format', add_help=add_help) + + parser.add_argument('--input', required=True, help='input pth file') +@@ -30,11 +30,15 @@ def parse_args(add_help=True): + help="Model data layout") + parser.add_argument('--device', default='cuda', help='device') + +- args = parser.parse_args() ++ if custom_argv is None: ++ args = parser.parse_args() ++ else: ++ args = parser.parse_args(args=custom_argv) + + args.output = args.output or ('retinanet_'+args.backbone+'.onnx') + return args + ++ + def main(args): + batch_size = args.batch_size or 1 + image_size = args.image_size or [800, 800] +@@ -51,6 +55,25 @@ def main(args): + + print("Loading model") + checkpoint = torch.load(args.input) ++ ++ # For some reason the batchnorms in the checkpoint do not have the same sizes as the module object. The checkpoint ++ # batchnorms have a size of [1, N, 1, 1], while the model batchnorms just have a size of [N]. ++ # However, this is fine, since (assuming the README is correct), the batchnorms were frozen and were not modified ++ # during training. ++ target_state_dict = model.state_dict() ++ for k, v in target_state_dict.items(): ++ ckpt_val = checkpoint["model"][k] ++ if v.size() == ckpt_val.size(): ++ continue ++ target_size = torch.tensor(v.size()) ++ actual_size = torch.tensor(ckpt_val.size()) ++ flattened = torch.flatten(actual_size) ++ if all(target_size != flattened): ++ raise ValueError(f"Real size mismatch for {k}: {target_size} vs {actual_size}") ++ checkpoint["model"][k] = checkpoint["model"][k].view(target_size) ++ # Remove unexpected keys ++ for k in [k for k in checkpoint["model"] if k not in target_state_dict]: ++ del checkpoint["model"][k] + model.load_state_dict(checkpoint['model']) + + print("Creating input tensor") +@@ -60,20 +83,31 @@ def main(args): + dtype=torch.float) + inputs = torch.autograd.Variable(rand) + # Output dynamic axes ++ """ + dynamic_axes = { + 'boxes': {0 : 'num_detections'}, + 'scores': {0 : 'num_detections'}, + 'labels': {0 : 'num_detections'}, + } ++ """ ++ + # Input dynamic axes ++ """ + if (args.batch_size is None) or (args.image_size is None): + dynamic_axes['images'] = {} + if args.batch_size is None: +- dynamic_axes['images'][0]: 'batch_size' ++ dynamic_axes['images'][0] = 'batch_size' + if args.image_size is None: + dynamic_axes['images'][2] = 'width' + dynamic_axes['images'][3] = 'height' +- ++ """ ++ # Force dynamic batch_size ++ dynamic_axes = { ++ "images": {0: "batch_size"}, ++ "cls_logits": {0: "batch_size", 1: "num_regions", 2: "num_classes"}, ++ "bbox_regression": {0: "batch_size", 1: "num_regions", 2: "bbox_coord_dim"}, ++ } ++ print(dynamic_axes) + + print("Exporting the model") + model.eval() +@@ -81,10 +115,11 @@ def main(args): + inputs, + args.output, + export_params=True, +- opset_version=13, +- do_constant_folding=False, ++ opset_version=11, ++ do_constant_folding=True, + input_names=['images'], +- output_names=['boxes', 'scores', 'labels'], ++ # output_names=['boxes', 'scores', 'labels'], ++ output_names=['cls_logits', 'bbox_regression'], + dynamic_axes=dynamic_axes) + + +diff --git a/single_stage_detector/ssd/run_pth_to_onnx.sh b/single_stage_detector/ssd/run_pth_to_onnx.sh +new file mode 100644 +index 0000000..e244aed +--- /dev/null ++++ b/single_stage_detector/ssd/run_pth_to_onnx.sh +@@ -0,0 +1,9 @@ ++docker build -t mlperf/single_stage_detector . ++docker run -v /home/mlperf_inference_data:/home/mlperf_inference_data \ ++ -v /home/scratch.etcheng_sw/mlperf-training:/mnt/training \ ++ --gpus=0 -e NVIDIA_VISIBLE_DEVICES=0 mlperf/single_stage_detector:latest \ ++ python pth_to_onnx.py \ ++ --num-classes 264 \ ++ --image-size 800 800 \ ++ --input /home/mlperf_inference_data/models/retinanet-resnext50-32x4d/new/retinanet_model_10.pth \ ++ --output /mnt/training/resnext-retinanet-ckpts/onnx/retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx diff --git a/script/get-nvidia-docker/README.md b/script/get-nvidia-docker/README.md new file mode 100644 index 0000000000..ec8e714c2f --- /dev/null +++ b/script/get-nvidia-docker/README.md @@ -0,0 +1,123 @@ +Automatically generated README for this automation recipe: **get-nvidia-docker** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-nvidia-docker,465ae240998e4779) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-docker)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine` + +`cm run script --tags=get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine ` + +*or* + +`cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine"` + +`cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get install nvidia nvidia-container-toolkit nvidia-docker engine" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-docker/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,docker + - CM script: [get-docker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-docker) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-docker/_cm.json) + 1. ***Run native script if exists*** + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-docker/run-ubuntu.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-docker/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-docker/_cm.json) + +___ +### Script output +`cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-nvidia-docker/_cm.json b/script/get-nvidia-docker/_cm.json new file mode 100644 index 0000000000..7cd1c042c7 --- /dev/null +++ b/script/get-nvidia-docker/_cm.json @@ -0,0 +1,34 @@ +{ + "alias": "get-nvidia-docker", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "get,docker" + } + ], + "docker_input_mapping": {}, + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "install", + "nvidia", + "nvidia-container-toolkit", + "nvidia-docker", + "engine" + ], + "uid": "465ae240998e4779", + "variations": {}, + "versions": {} +} diff --git a/script/get-nvidia-docker/run-ubuntu.sh b/script/get-nvidia-docker/run-ubuntu.sh new file mode 100644 index 0000000000..21d51161ff --- /dev/null +++ b/script/get-nvidia-docker/run-ubuntu.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +if [[ ! -f /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg ]]; then + cmd="curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg" + echo "$cmd" + eval "$cmd" +fi + +cmd="curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ + && \ + sudo apt-get update" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +cmd="sudo apt-get install -y nvidia-container-toolkit" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +cmd="sudo nvidia-ctk runtime configure --runtime=docker" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +cmd="sudo systemctl restart docker" +cmd="sudo service docker restart" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? diff --git a/script/get-nvidia-mitten/README-extra.md b/script/get-nvidia-mitten/README-extra.md new file mode 100644 index 0000000000..8c1a219486 --- /dev/null +++ b/script/get-nvidia-mitten/README-extra.md @@ -0,0 +1 @@ +TBD: compile https://github.com/NVIDIA/mitten diff --git a/script/get-nvidia-mitten/README.md b/script/get-nvidia-mitten/README.md new file mode 100644 index 0000000000..0e85547537 --- /dev/null +++ b/script/get-nvidia-mitten/README.md @@ -0,0 +1,134 @@ +Automatically generated README for this automation recipe: **get-nvidia-mitten** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-nvidia-mitten,1c045f2902374de9) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,nvidia,mitten,nvidia-mitten* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get nvidia mitten nvidia-mitten" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,nvidia,mitten,nvidia-mitten` + +`cm run script --tags=get,nvidia,mitten,nvidia-mitten ` + +*or* + +`cmr "get nvidia mitten nvidia-mitten"` + +`cmr "get nvidia mitten nvidia-mitten " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,nvidia,mitten,nvidia-mitten' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,nvidia,mitten,nvidia-mitten"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,nvidia,mitten,nvidia-mitten) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get nvidia mitten nvidia-mitten" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `master` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_pycuda + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,git,_repo.https://github.com/NVIDIA/mitten + * CM names: `--adr.['nvidia-mitten-git-src']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-nvidia-mitten/_cm.json) + +___ +### Script output +`cmr "get nvidia mitten nvidia-mitten " -j` +#### New environment keys (filter) + +* `CM_NVIDIA_MITTEN*` +#### New environment keys auto-detected from customize diff --git a/script/get-nvidia-mitten/_cm.json b/script/get-nvidia-mitten/_cm.json new file mode 100644 index 0000000000..94675091bd --- /dev/null +++ b/script/get-nvidia-mitten/_cm.json @@ -0,0 +1,52 @@ +{ + "alias": "get-nvidia-mitten", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_version": "master", + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_pycuda" + }, + { + "tags": "get,git,_repo.https://github.com/NVIDIA/mitten", + "force_env_keys": [ + "CM_GIT_CHECKOUT" + ], + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_NVIDIA_MITTEN_SRC" + }, + "extra_cache_tags": "nvidia,mitten,src", + "names": [ + "nvidia-mitten-git-src" + ] + } + ], + "extra_cache_tags_from_env": [ + { + "env": "CM_PYTHON_CACHE_TAGS", + "prefix": "python-" + } + ], + "new_env_keys": [ + "CM_NVIDIA_MITTEN*" + ], + "tags": [ + "get", + "nvidia", + "mitten", + "nvidia-mitten" + ], + "uid": "1c045f2902374de9" +} diff --git a/script/get-nvidia-mitten/customize.py b/script/get-nvidia-mitten/customize.py new file mode 100644 index 0000000000..d38c8c2ca5 --- /dev/null +++ b/script/get-nvidia-mitten/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + # TBD + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + # TBD + cur_dir = os.getcwd() + + + + return {'return':0} diff --git a/script/get-nvidia-mitten/run.bat b/script/get-nvidia-mitten/run.bat new file mode 100644 index 0000000000..ceaa88fea4 --- /dev/null +++ b/script/get-nvidia-mitten/run.bat @@ -0,0 +1,3 @@ +@echo off + +echo TBD diff --git a/script/get-nvidia-mitten/run.sh b/script/get-nvidia-mitten/run.sh new file mode 100644 index 0000000000..28b1ea4ce1 --- /dev/null +++ b/script/get-nvidia-mitten/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +cd ${CM_NVIDIA_MITTEN_SRC} +${CM_PYTHON_BIN_WITH_PATH} -m pip install . +test $? -eq 0 || exit $? diff --git a/script/get-onnxruntime-prebuilt/README.md b/script/get-onnxruntime-prebuilt/README.md new file mode 100644 index 0000000000..f9c0905630 --- /dev/null +++ b/script/get-onnxruntime-prebuilt/README.md @@ -0,0 +1,159 @@ +Automatically generated README for this automation recipe: **get-onnxruntime-prebuilt** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-onnxruntime-prebuilt,be02c84ff57c4244) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install onnxruntime get prebuilt lib lang-c lang-cpp" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp` + +`cm run script --tags=install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp[,variations] ` + +*or* + +`cmr "install onnxruntime get prebuilt lib lang-c lang-cpp"` + +`cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install onnxruntime get prebuilt lib lang-c lang-cpp[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_ONNXRUNTIME_DEVICE*: `` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_ONNXRUNTIME_DEVICE*: `gpu` + - Workflow: + +
+ + +#### Default variations + +`_cpu` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `1.16.3` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-onnxruntime-prebuilt/_cm.json) + +___ +### Script output +`cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [,variations]" -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_ONNXRUNTIME_INCLUDE_PATH` +* `CM_ONNXRUNTIME_LIB_PATH` +#### New environment keys auto-detected from customize + +* `CM_ONNXRUNTIME_INCLUDE_PATH` +* `CM_ONNXRUNTIME_LIB_PATH` \ No newline at end of file diff --git a/script/get-onnxruntime-prebuilt/_cm.json b/script/get-onnxruntime-prebuilt/_cm.json new file mode 100644 index 0000000000..4a9b77d795 --- /dev/null +++ b/script/get-onnxruntime-prebuilt/_cm.json @@ -0,0 +1,48 @@ +{ + "alias": "get-onnxruntime-prebuilt", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML frameworks", + "cache": true, + "clean_files": [], + "default_version": "1.16.3", + "deps": [ + { + "tags": "detect,os" + } + ], + "new_env_keys": [ + "CM_ONNXRUNTIME_LIB_PATH", + "CM_ONNXRUNTIME_INCLUDE_PATH", + "+PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ], + "tags": [ + "install", + "onnxruntime", + "get", + "prebuilt", + "lib", + "lang-c", + "lang-cpp" + ], + "uid": "be02c84ff57c4244", + "variations": { + "cpu": { + "group": "device", + "default": true, + "env": { + "CM_ONNXRUNTIME_DEVICE": "" + } + }, + "cuda": { + "group": "device", + "env": { + "CM_ONNXRUNTIME_DEVICE": "gpu" + } + } + } +} diff --git a/script/get-onnxruntime-prebuilt/customize.py b/script/get-onnxruntime-prebuilt/customize.py new file mode 100644 index 0000000000..786bc8122c --- /dev/null +++ b/script/get-onnxruntime-prebuilt/customize.py @@ -0,0 +1,74 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + env = i['env'] + + machine = env.get('CM_HOST_OS_MACHINE','') + if machine == '': machine = 'x86_64' + if machine == 'x86_64': machine = 'x64' + + hostos=env['CM_HOST_OS_TYPE'] + + ext = '.tgz' + + if hostos =='darwin': hostos='osx' + elif hostos =='windows': + hostos='win' + ext = '.zip' + + device=env.get('CM_ONNXRUNTIME_DEVICE','') + if device!='': machine+='-'+device + + version = env['CM_VERSION'] + + FOLDER = 'onnxruntime-{}-{}-{}'.format(hostos, machine, version) + + FILENAME = FOLDER + ext + + URL = 'https://github.com/microsoft/onnxruntime/releases/download/v{}/{}'.format(version, FILENAME) + + print ('') + print ('Downloading from {}'.format(URL)) + print ('') + + env['FOLDER'] = FOLDER + env['FILENAME'] = FILENAME + env['URL'] = URL + + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + hostos=env['CM_HOST_OS_TYPE'] + + install_folder = env['CM_TMP_INSTALL_FOLDER'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if key not in env: + env[key] = [] + + include_path = os.path.join(os.getcwd(), 'install', install_folder, 'include') + + env['+C_INCLUDE_PATH'].append(include_path) + env['+CPLUS_INCLUDE_PATH'].append(include_path) + + lib_path = os.path.join(os.getcwd(), 'install', install_folder, 'lib') + + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + if hostos =='windows': + # For dynamic libraries + env['+PATH'] = [lib_path] + + env['CM_ONNXRUNTIME_LIB_PATH'] = lib_path + env['CM_ONNXRUNTIME_INCLUDE_PATH'] = include_path + + return {'return':0} diff --git a/script/get-onnxruntime-prebuilt/run.bat b/script/get-onnxruntime-prebuilt/run.bat new file mode 100644 index 0000000000..ea9ebc982c --- /dev/null +++ b/script/get-onnxruntime-prebuilt/run.bat @@ -0,0 +1,10 @@ +del /Q /S install +del /Q %FILENAME% + +wget --no-check-certificate %URL% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip %FILENAME% -d install +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo CM_TMP_INSTALL_FOLDER=%FOLDER% > tmp-run-env.out diff --git a/script/get-onnxruntime-prebuilt/run.sh b/script/get-onnxruntime-prebuilt/run.sh new file mode 100644 index 0000000000..6be34ea8a8 --- /dev/null +++ b/script/get-onnxruntime-prebuilt/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +rm -rf install +rm -f ${FILENAME} + +mkdir -p install + +wget --no-check-certificate ${URL} +test $? -eq 0 || exit 1 + +tar -C install -xzf ${FILENAME} +test $? -eq 0 || exit 1 + +echo "CM_TMP_INSTALL_FOLDER=$FOLDER" > tmp-run-env.out diff --git a/script/get-openssl/README-extra.md b/script/get-openssl/README-extra.md new file mode 100644 index 0000000000..c4f88f9754 --- /dev/null +++ b/script/get-openssl/README-extra.md @@ -0,0 +1,8 @@ +# Get OpenSSL +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects openssl installed on the system and if not found calls the [install script for openssl](../script/install-openssl). + +## Exported Variables +* `CM_OPENSSL_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-openssl/README.md b/script/get-openssl/README.md new file mode 100644 index 0000000000..242bfc2c15 --- /dev/null +++ b/script/get-openssl/README.md @@ -0,0 +1,126 @@ +Automatically generated README for this automation recipe: **get-openssl** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-openssl,febdae70e9e64e30) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,openssl,lib,lib-openssl* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get openssl lib lib-openssl" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,openssl,lib,lib-openssl` + +`cm run script --tags=get,openssl,lib,lib-openssl ` + +*or* + +`cmr "get openssl lib lib-openssl"` + +`cmr "get openssl lib lib-openssl " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,openssl,lib,lib-openssl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,openssl,lib,lib-openssl"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,openssl,lib,lib-openssl) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get openssl lib lib-openssl" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl/_cm.json)*** + * install,openssl + * `if (CM_REQUIRE_INSTALL == yes)` + - CM script: [install-openssl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-openssl) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-openssl/_cm.json) + +___ +### Script output +`cmr "get openssl lib lib-openssl " -j` +#### New environment keys (filter) + +* `+LD_LIBRARY_PATH` +* `CM_OPENSSL_*` +#### New environment keys auto-detected from customize + +* `CM_OPENSSL_INSTALLED_PATH` \ No newline at end of file diff --git a/script/get-openssl/_cm.json b/script/get-openssl/_cm.json new file mode 100644 index 0000000000..20cdaa778f --- /dev/null +++ b/script/get-openssl/_cm.json @@ -0,0 +1,33 @@ +{ + "alias": "get-openssl", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Detection or installation of tools and artifacts", + "cache": true, + "clean_files": [], + "env": { + "CM_REQUIRE_INSTALL": "no" + }, + "new_env_keys": [ + "CM_OPENSSL_*", + "+LD_LIBRARY_PATH" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,openssl" + } + ], + "tags": [ + "get", + "openssl", + "lib", + "lib-openssl" + ], + "uid": "febdae70e9e64e30" +} diff --git a/script/get-openssl/customize.py b/script/get-openssl/customize.py new file mode 100644 index 0000000000..9d126fd79e --- /dev/null +++ b/script/get-openssl/customize.py @@ -0,0 +1,57 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'openssl' + if 'CM_OPENSSL_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_OPENSSL_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':i['recursion_spaces']}) + if r['return']>0: + if r['return'] == 16 and os_info['platform'] != 'windows': + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'OpenSSL\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_OPENSSL_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] >0: return r + version = r['version'] + found_file_path = env['CM_OPENSSL_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_OPENSSL_INSTALLED_PATH'] = found_path + + # Save tags that can be used to specialize further dependencies (such as python packages) + tags = 'version-'+version + + return {'return':0, 'version': version} diff --git a/script/get-openssl/run.sh b/script/get-openssl/run.sh new file mode 100644 index 0000000000..14277c91a0 --- /dev/null +++ b/script/get-openssl/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +openssl_bin=${CM_OPENSSL_BIN_WITH_PATH} +${openssl_bin} version > tmp-ver.out 2>/dev/null +test $? -eq 0 || exit 1 diff --git a/script/get-preprocessed-dataset-criteo/README-extra.md b/script/get-preprocessed-dataset-criteo/README-extra.md new file mode 100644 index 0000000000..7a6f991373 --- /dev/null +++ b/script/get-preprocessed-dataset-criteo/README-extra.md @@ -0,0 +1,16 @@ +# Get Preprocessed Criteo Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Criteo dataset. + +## How To +```bash +cm run script --tags=get,criteo,preprocessed --threads=[NUM_THREADS] +``` +where, +* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory +* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. + + +## Exported Variables +* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored + + diff --git a/script/get-preprocessed-dataset-criteo/README.md b/script/get-preprocessed-dataset-criteo/README.md new file mode 100644 index 0000000000..6b8543a3fd --- /dev/null +++ b/script/get-preprocessed-dataset-criteo/README.md @@ -0,0 +1,227 @@ +Automatically generated README for this automation recipe: **get-preprocessed-dataset-criteo** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocessed-dataset-criteo,afa59956272a4ba4) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,criteo,recommendation,dlrm,preprocessed* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset criteo recommendation dlrm preprocessed" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,criteo,recommendation,dlrm,preprocessed` + +`cm run script --tags=get,dataset,criteo,recommendation,dlrm,preprocessed[,variations] [--input_flags]` + +*or* + +`cmr "get dataset criteo recommendation dlrm preprocessed"` + +`cmr "get dataset criteo recommendation dlrm preprocessed [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,criteo,recommendation,dlrm,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,criteo,recommendation,dlrm,preprocessed"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,criteo,recommendation,dlrm,preprocessed) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset criteo recommendation dlrm preprocessed[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_1` + - Environment variables: + - *CM_DATASET_SIZE*: `1` + - Workflow: + * `_50` + - Environment variables: + - *CM_DATASET_SIZE*: `50` + - Workflow: + * `_fake` + - Environment variables: + - *CM_CRITEO_FAKE*: `yes` + - Workflow: + * `_full` + - Workflow: + * `_validation` + - Workflow: + +
+ + + * Group "**type**" +
+ Click here to expand this section. + + * **`_multihot`** (default) + - Environment variables: + - *CM_DATASET_CRITEO_MULTIHOT*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,mlperf,training,src + * CM names: `--adr.['mlperf-training', 'training-src']...` + - CM script: [get-mlperf-training-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-src) + * get,generic-python-lib,_package.typing_inspect + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.iopath + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.fbgemm_gpu + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.torchrec + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.pyre_extensions + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + +#### Default variations + +`_multihot` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` +* `--output_dir=value` → `CM_DATASET_PREPROCESSED_OUTPUT_PATH=value` +* `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,criteo,original + * `if (CM_DATASET_PREPROCESSED_PATH != on)` + * CM names: `--adr.['original-dataset', 'criteo-dataset']...` + - CM script: [get-dataset-criteo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-criteo) + * get,dlrm,src + * CM names: `--adr.['dlrm-src']...` + - CM script: [get-dlrm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dlrm) + * mlperf,mlcommons,inference,source,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_scikit-learn + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_decorator + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_psutil + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_onnx + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tqdm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_mlperf_logging + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo/_cm.json) + 1. ***Run native script if exists*** + * [run-multihot.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo/run-multihot.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-criteo/_cm.json) + +___ +### Script output +`cmr "get dataset criteo recommendation dlrm preprocessed [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_PREPROCESSED_PATH` \ No newline at end of file diff --git a/script/get-preprocessed-dataset-criteo/_cm.json b/script/get-preprocessed-dataset-criteo/_cm.json new file mode 100644 index 0000000000..a5b4c4e1f0 --- /dev/null +++ b/script/get-preprocessed-dataset-criteo/_cm.json @@ -0,0 +1,150 @@ +{ + "alias": "get-preprocessed-dataset-criteo", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML datasets", + "cache": true, + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "names": [ + "original-dataset", + "criteo-dataset" + ], + "tags": "get,dataset,criteo,original", + "skip_if_env": { + "CM_DATASET_PREPROCESSED_PATH": [ + "on" + ] + } + }, + { + "names": [ + "dlrm-src" + ], + "tags": "get,dlrm,src" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlperf,mlcommons,inference,source,src" + }, + { + "tags": "get,generic-python-lib,_scikit-learn" + }, + { + "tags": "get,generic-python-lib,_torch" + }, + { + "tags": "get,generic-python-lib,_opencv-python" + }, + { + "tags": "get,generic-python-lib,_decorator" + }, + { + "tags": "get,generic-python-lib,_psutil" + }, + { + "tags": "get,generic-python-lib,_onnx" + }, + { + "tags": "get,generic-python-lib,_tqdm" + }, + { + "tags": "get,generic-python-lib,_mlperf_logging" + } + ], + "input_mapping": { + "dir": "CM_DATASET_PREPROCESSED_PATH", + "output_dir": "CM_DATASET_PREPROCESSED_OUTPUT_PATH", + "threads": "CM_NUM_PREPROCESS_THREADS" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "criteo", + "recommendation", + "dlrm", + "preprocessed" + ], + "uid": "afa59956272a4ba4", + "variations": { + "1": { + "env": { + "CM_DATASET_SIZE": "1" + } + }, + "50": { + "env": { + "CM_DATASET_SIZE": "50" + } + }, + "full": { + "env": { + }, + "add_deps_recursive": { + "original-dataset": { + "tags": "-_fake" + } + } + }, + "validation": { + "add_deps": { + "original-dataset": { + "tags": "_validation" + } + } + }, + "fake": { + "add_deps_recursive": { + "original-dataset": { + "tags": "_fake" + } + }, + "env": { + "CM_CRITEO_FAKE": "yes" + } + }, + "multihot": { + "group": "type", + "default": true, + "env": { + "CM_DATASET_CRITEO_MULTIHOT": "yes" + }, + "deps": [ + { + "tags": "get,mlperf,training,src", + "names": [ + "mlperf-training", + "training-src" + ] + }, + { + "tags": "get,generic-python-lib,_package.typing_inspect" + }, + { + "tags": "get,generic-python-lib,_package.iopath" + }, + { + "tags": "get,generic-python-lib,_package.fbgemm_gpu" + }, + { + "tags": "get,generic-python-lib,_package.torchrec" + }, + { + "tags": "get,generic-python-lib,_package.pyre_extensions" + } + ] + } + } +} diff --git a/script/get-preprocessed-dataset-criteo/customize.py b/script/get-preprocessed-dataset-criteo/customize.py new file mode 100644 index 0000000000..d6826e38cb --- /dev/null +++ b/script/get-preprocessed-dataset-criteo/customize.py @@ -0,0 +1,31 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + skip_preprocessing = False + if env.get('CM_DATASET_PREPROCESSED_PATH', '') != '': + ''' + Path with preprocessed dataset given as input + ''' + skip_preprocessing = True + print("Using preprocessed criteo dataset from '" + env['CM_DATASET_PREPROCESSED_PATH'] +"'") + + if not skip_preprocessing and env.get('CM_DATASET_PREPROCESSED_OUTPUT_PATH','') != '': + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + + if not skip_preprocessing and env.get('CM_DATASET_CRITEO_MULTIHOT', '') == 'yes': + i['run_script_input']['script_name'] = "run-multihot" + #${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py + output_dir = env['CM_DATASET_PREPROCESSED_PATH'] + dataset_path = env['CM_DATASET_PATH'] + tmp_dir = os.path.join(output_dir, "tmp") + run_dir = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], "recommendation_v2", "torchrec_dlrm", "scripts") + env['CM_RUN_CMD'] = f'cd {run_dir} && bash ./process_Criteo_1TB_Click_Logs_dataset.sh {dataset_path} {tmp_dir} {output_dir} ' + + print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-criteo/preprocess.py b/script/get-preprocessed-dataset-criteo/preprocess.py new file mode 100644 index 0000000000..bd2e5f5430 --- /dev/null +++ b/script/get-preprocessed-dataset-criteo/preprocess.py @@ -0,0 +1,32 @@ +import os +import sys +mlperf_dlrm_path = os.environ['CM_MLPERF_INFERENCE_DLRM_PATH'] +python_path = os.path.join(mlperf_dlrm_path, "pytorch", "python") +sys.path.insert(0, python_path) + +import criteo +import dataset + +dataset_name = os.environ['CM_DATASET'] +dataset_path = os.environ['CM_DATASET_PATH'] +dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) +samples_to_aggregate_fix = os.environ.get('CM_DATASET_SAMPLES_TO_AGGREGATE_FIX', None) +samples_to_aggregate_min = os.environ.get('CM_DATASET_SAMPLES_TO_AGGREGATE_MIN', None) +samples_to_aggregate_max = os.environ.get('CM_DATASET_SAMPLES_TO_AGGREGATE_MAX', None) +count = int(os.environ.get('CM_DATASET_SIZE', 0)) or None +max_ind_range = os.environ.get('CM_DATASET_MAX_IND_RANGE',-1) +threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) +threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) + +criteo.Criteo(data_path=dataset_path, + name=dataset_name, + pre_process = criteo.pre_process_criteo_dlrm, + use_cache=True, + samples_to_aggregate_fix=samples_to_aggregate_fix, + samples_to_aggregate_min=samples_to_aggregate_min, + samples_to_aggregate_max=samples_to_aggregate_max, + max_ind_range=max_ind_range, + count=count, + mlperf_bin_loader=False, + test_num_workers=threads + ) diff --git a/script/get-preprocessed-dataset-criteo/run-multihot.sh b/script/get-preprocessed-dataset-criteo/run-multihot.sh new file mode 100644 index 0000000000..e4741b41d5 --- /dev/null +++ b/script/get-preprocessed-dataset-criteo/run-multihot.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +CUR=$PWD +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit $? diff --git a/script/get-preprocessed-dataset-criteo/run.sh b/script/get-preprocessed-dataset-criteo/run.sh new file mode 100644 index 0000000000..5c080f4c0d --- /dev/null +++ b/script/get-preprocessed-dataset-criteo/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +CUR=$PWD + +if [[ ${CM_CRITEO_FAKE} == "yes" ]]; then + exit 0 +fi +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py diff --git a/script/get-preprocessed-dataset-generic/README.md b/script/get-preprocessed-dataset-generic/README.md new file mode 100644 index 0000000000..40a5f00177 --- /dev/null +++ b/script/get-preprocessed-dataset-generic/README.md @@ -0,0 +1,119 @@ +Automatically generated README for this automation recipe: **get-preprocesser-script-generic** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocesser-script-generic,d5e603627e2046eb) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocesser-script-generic)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,preprocessor,generic,image-preprocessor,script* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get preprocessor generic image-preprocessor script" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,preprocessor,generic,image-preprocessor,script` + +`cm run script --tags=get,preprocessor,generic,image-preprocessor,script ` + +*or* + +`cmr "get preprocessor generic image-preprocessor script"` + +`cmr "get preprocessor generic image-preprocessor script " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,preprocessor,generic,image-preprocessor,script' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,preprocessor,generic,image-preprocessor,script"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,preprocessor,generic,image-preprocessor,script) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get preprocessor generic image-preprocessor script" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocesser-script-generic/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocesser-script-generic/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocesser-script-generic/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocesser-script-generic/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocesser-script-generic/_cm.json) + +___ +### Script output +`cmr "get preprocessor generic image-preprocessor script " -j` +#### New environment keys (filter) + +* `+PYTHONPATH` +#### New environment keys auto-detected from customize diff --git a/script/get-preprocessed-dataset-generic/_cm.json b/script/get-preprocessed-dataset-generic/_cm.json new file mode 100644 index 0000000000..6e312a39ed --- /dev/null +++ b/script/get-preprocessed-dataset-generic/_cm.json @@ -0,0 +1,18 @@ +{ + "alias": "get-preprocesser-script-generic", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "AI/ML datasets", + "new_env_keys": [ + "+PYTHONPATH" + ], + "tags": [ + "get", + "preprocessor", + "generic", + "image-preprocessor", + "script" + ], + "uid": "d5e603627e2046eb" +} diff --git a/script/get-preprocessed-dataset-generic/customize.py b/script/get-preprocessed-dataset-generic/customize.py new file mode 100644 index 0000000000..b882aaf93d --- /dev/null +++ b/script/get-preprocessed-dataset-generic/customize.py @@ -0,0 +1,10 @@ +from cmind import utils +import os + +def preprocess(i): + + env = i['env'] + path = i['run_script_input']['path'] + env['+PYTHONPATH'] = [ os.path.join(path, "src") ] + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py new file mode 100644 index 0000000000..752895db88 --- /dev/null +++ b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 + +supported_extensions = ['jpeg', 'jpg', 'gif', 'png'] + +import os +import cv2 +import numpy as np + +# Load and preprocess image +def load_image(image_path, # Full path to processing image + target_size, # Desired size of resulting image + intermediate_size = 0, # Scale to this size then crop to target size + crop_percentage = 87.5,# Crop to this percentage then scale to target size + data_type = 'uint8', # Data type to store + data_layout = 'nhwc', # Data layout to store + convert_to_bgr = False,# Swap image channel RGB -> BGR + interpolation_method = cv2.INTER_LINEAR # Interpolation method. + ): + + out_height = target_size + out_width = target_size + + def resize_with_aspectratio(img): + height, width, _ = img.shape + new_height = int(100. * out_height / crop_percentage) # intermediate oversized image from which to crop + new_width = int(100. * out_width / crop_percentage) # ---------------------- ,, --------------------- + if height > width: + w = new_width + h = int(new_height * height / width) + else: + h = new_height + w = int(new_width * width / height) + img = cv2.resize(img, (w, h), interpolation = interpolation_method) + return img + + def center_crop(img): + height, width, _ = img.shape + left = int((width - out_width) / 2) + right = int((width + out_width) / 2) + top = int((height - out_height) / 2) + bottom = int((height + out_height) / 2) + img = img[top:bottom, left:right] + return img + + + img = cv2.imread(image_path) + + if len(img.shape) < 3 or img.shape[2] != 3: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + else: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + # Mimic preprocessing steps from the official reference code. + img = resize_with_aspectratio(img) + img = center_crop(img) + + # Convert to BGR. + if convert_to_bgr: + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + + return img + + +def preprocess_files(selected_filenames, source_dir, destination_dir, crop_percentage, square_side, inter_size, convert_to_bgr, + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method): + "Go through the selected_filenames and preprocess all the files (optionally normalize and subtract mean)" + + output_filenames = [] + + for current_idx in range(len(selected_filenames)): + input_filename = selected_filenames[current_idx] + + full_input_path = os.path.join(source_dir, input_filename) + + image_data = load_image(image_path = full_input_path, + target_size = square_side, + intermediate_size = inter_size, + crop_percentage = crop_percentage, + data_type = data_type, + convert_to_bgr = convert_to_bgr, + interpolation_method = interpolation_method) + + image_data = np.asarray(image_data, dtype=data_type) + + # Normalize. + if normalize_data: + image_data = image_data/127.5 - 1.0 + + # Subtract mean value. + if subtract_mean: + if len(given_channel_means): + image_data -= given_channel_means + else: + image_data -= np.mean(image_data) + + # Subtract standard deviations. + if len(given_channel_stds): + image_data /= given_channel_stds + + # NHWC -> NCHW. + if data_layout == 'nchw': + image_data = image_data[:,:,0:3].transpose(2, 0, 1) + + # Value 1 for quantization to int8 + if quantize == 1: + image_data = quantize_to_int8(image_data, quant_scale, quant_offset) + + # Value 1 to convert from int8 to uint8 + if convert_to_unsigned == 1: + image_data = int8_to_uint8(image_data) + + output_filename = input_filename.rsplit('.', 1)[0] + '.' + new_file_extension if new_file_extension else input_filename + + full_output_path = os.path.join(destination_dir, output_filename) + image_data.tofile(full_output_path) + + print("[{}]: Stored {}".format(current_idx+1, full_output_path) ) + + output_filenames.append(output_filename) + + return output_filenames + +def quantize_to_int8(image, scale, offset): + quant_image = (image/scale + offset).astype(np.float32) + output = np.copy(quant_image) + gtZero = (quant_image > 0).astype(int) + gtZero = gtZero * 0.5 + output=output+gtZero + ltZero = (quant_image < 0).astype(int) + ltZero = ltZero * (-0.5) + output=output+ltZero + return output.astype(np.int8) + + +def int8_to_uint8(image): + image = (image+128).astype(np.uint8) + return image + +def preprocess(): + import sys + + source_dir = os.environ['CM_DATASET_PATH'] + destination_dir = os.environ['CM_DATASET_PREPROCESSED_PATH'] + + square_side = int( os.environ['CM_DATASET_INPUT_SQUARE_SIDE'] ) + crop_percentage = float( os.environ['CM_DATASET_CROP_FACTOR'] ) + inter_size = int( os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0) ) + convert_to_bgr = int( os.getenv('CM_DATASET_CONVERT_TO_BGR', 0) ) + offset = int( os.getenv('CM_DATASET_SUBSET_OFFSET', 0) ) + volume = int( os.environ['CM_DATASET_SIZE'] ) + fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') + data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') + data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() + new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') + normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) + subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) + given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') + given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + quant_scale = float( os.environ['CM_DATASET_QUANT_SCALE'] ) + quant_offset = float( os.environ['CM_DATASET_QUANT_OFFSET'] ) + quantize = int( os.environ['CM_DATASET_QUANTIZE'] ) #1 for quantize to int8 + convert_to_unsigned = int( os.environ['CM_DATASET_CONVERT_TO_UNSIGNED'] ) #1 for int8 to uint8 + + images_list = os.getenv('CM_DATASET_IMAGES_LIST') + + if given_channel_means: + given_channel_means = [ float(x) for x in given_channel_means.split(' ') ] + + if given_channel_stds: + given_channel_stds = [ float(x) for x in given_channel_stds.split(' ') ] + + interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') + + print(("From: {}, To: {}, Size: {}, Crop: {}, InterSize: {}, 2BGR: {}, OFF: {}, VOL: '{}', FOF: {},"+ + " DTYPE: {}, DLAYOUT: {}, EXT: {}, NORM: {}, SMEAN: {}, GCM: {}, GSTD: {}, QUANTIZE: {}, QUANT_SCALE: {}, QUANT_OFFSET: {}, CONV_UNSIGNED: {}, INTER: {}").format( + source_dir, destination_dir, square_side, crop_percentage, inter_size, convert_to_bgr, offset, volume, fof_name, + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method) ) + + if interpolation_method == 'INTER_AREA': + # Used for ResNet in pre_process_vgg. + interpolation_method = cv2.INTER_AREA + else: + # Default interpolation method. + interpolation_method = cv2.INTER_LINEAR + + filenames = [] + if images_list: + with open(images_list) as f: + filenames = f.read().splitlines() + else: + filenames = sorted(os.listdir(source_dir)) + + + if os.path.isdir(source_dir): + sorted_filenames = [filename for filename in filenames if any(filename.lower().endswith(extension) for extension in supported_extensions) and not filename.startswith(".") ] + + total_volume = len(sorted_filenames) + + if offset<0: # support offsets "from the right" + offset += total_volume + + selected_filenames = sorted_filenames[offset:offset+volume] + + assert len(selected_filenames) == volume + + output_filenames = preprocess_files( + selected_filenames, source_dir, destination_dir, crop_percentage, square_side, inter_size, convert_to_bgr, + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method) + + fof_full_path = os.path.join(destination_dir, fof_name) + with open(fof_full_path, 'w') as fof: + for filename in output_filenames: + fof.write(filename + '\n') diff --git a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py new file mode 100644 index 0000000000..84e18ee397 --- /dev/null +++ b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 + +import os +import json +import numpy as np +from PIL import Image +import torch +import torchvision + +SUPPORTED_EXTENSIONS = ['jpeg', 'jpg', 'gif', 'png'] + +def load_image(image_path, target_size, data_type='uint8', convert_to_bgr=False, + normalize_data=False, normalize_lower=-1, normalize_upper=1, + subtract_mean=False, given_channel_means='', given_channel_stds='', + quantize=0, quant_scale=1, quant_offset=0, convert_to_unsigned=0): + if not convert_to_bgr: + image = Image.open(image_path).convert('RGB') + else: + image = Image.open(image_path).convert('BGR') + + tensor_image = torchvision.transforms.functional.to_tensor(image) + mean = torch.as_tensor(given_channel_means) + std = torch.as_tensor(given_channel_stds) + normalized_image = (tensor_image - mean[:, None, None]) / std[:, None, None] + + resized_image = torch.nn.functional.interpolate(normalized_image[None], + size=(target_size, target_size), + mode='bilinear')[0].numpy() + + if quantize == 1: + resized_image = quantize_to_uint8(resized_image, quant_scale, quant_offset) + + original_height, original_width, _ = resized_image.shape + batch_shape = (1, target_size, target_size, 3) + batch_data = resized_image.reshape(batch_shape) + + return batch_data, original_width, original_height + +def quantize_to_uint8(image, scale, offset): + quantized_image = (image.astype(np.float64) / scale + offset).astype(np.float64) + output = np.round_(quantized_image) + output = np.clip(output, 0, 255) + return output.astype(np.uint8) + +def preprocess_files(selected_filenames, source_dir, destination_dir, square_side, + data_type, convert_to_bgr, normalize_data, normalize_lower, + normalize_upper, subtract_mean, given_channel_means, + given_channel_stds, quantize, quant_scale, quant_offset, + convert_to_unsigned, new_file_extension): + output_signatures = [] + + for current_idx, input_filename in enumerate(selected_filenames): + full_input_path = os.path.join(source_dir, input_filename) + image_data, original_width, original_height = load_image( + image_path=full_input_path, + target_size=square_side, + data_type=data_type, + convert_to_bgr=convert_to_bgr, + normalize_data=normalize_data, + normalize_lower=normalize_lower, + normalize_upper=normalize_upper, + subtract_mean=subtract_mean, + given_channel_means=given_channel_means, + given_channel_stds=given_channel_stds, + quantize=quantize, + quant_scale=quant_scale, + quant_offset=quant_offset, + convert_to_unsigned=convert_to_unsigned + ) + + output_filename = f"{input_filename.rsplit('.', 1)[0]}.{new_file_extension}" if new_file_extension else input_filename + full_output_path = os.path.join(destination_dir, output_filename) + image_data.tofile(full_output_path) + + print(f"[{current_idx+1}]: Stored {full_output_path}") + output_signatures.append(f'{output_filename};{original_width};{original_height}') + + return output_signatures + +def preprocess(): + source_directory = os.environ['CM_DATASET_PATH'] + destination_directory = os.environ['CM_DATASET_PREPROCESSED_PATH'] + + intermediate_data_type = os.environ.get('CM_DATASET_INTERMEDIATE_DATA_TYPE', np.float32) + square_side = int(os.environ['CM_DATASET_INPUT_SQUARE_SIDE']) + crop_percentage = float(os.environ['CM_DATASET_CROP_FACTOR']) + inter_size = int(os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0)) + convert_to_bgr = int(os.getenv('CM_DATASET_CONVERT_TO_BGR', 0)) + offset = int(os.getenv('CM_DATASET_SUBSET_OFFSET', 0)) + volume = int(os.environ['CM_DATASET_SIZE']) + fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') + data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') + input_data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') + data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() + new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') + normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) + subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) + given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') + given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + quant_scale = float(os.environ['CM_DATASET_QUANT_SCALE']) + quant_offset = float(os.environ['CM_DATASET_QUANT_OFFSET']) + quantize = int(os.environ['CM_DATASET_QUANTIZE']) # 1 for quantize to int8 + convert_to_unsigned = int(os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 + + images_list = os.getenv('CM_DATASET_IMAGES_LIST') + interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') + + annotations_filepath = os.environ['CM_DATASET_ANNOTATIONS_FILE_PATH'] + is_calibration = os.environ['CM_DATASET_TYPE'] == "calibration" + image_file = os.getenv('CM_IMAGE_FILE', '') + + normalize_lower = float(os.getenv('CM_DATASET_NORMALIZE_LOWER', -1.0)) + normalize_upper = float(os.getenv('CM_DATASET_NORMALIZE_UPPER', 1.0)) + + if given_channel_means: + given_channel_means = np.fromstring(given_channel_means, dtype=np.float32, sep=' ').astype(intermediate_data_type) + if convert_to_bgr: + given_channel_means = given_channel_means[::-1] + + given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + if given_channel_stds: + given_channel_stds = np.fromstring(given_channel_stds, dtype=np.float32, sep=' ').astype(intermediate_data_type) + if convert_to_bgr: + given_channel_stds = given_channel_stds[::-1] + + print(f"From: {source_directory}, To: {destination_directory}, Size: {square_side}, Crop: {crop_percentage}, InterSize: {inter_size}, 2BGR: {convert_to_bgr}, " + + f"OFF: {offset}, VOL: '{volume}', FOF: {fof_name}, DTYPE: {data_type}, DLAYOUT: {data_layout}, EXT: {new_file_extension}, " + + f"NORM: {normalize_data}, SMEAN: {subtract_mean}, GCM: {given_channel_means}, GSTD: {given_channel_stds}, QUANTIZE: {quantize}, QUANT_SCALE: {quant_scale}, " + + f"QUANT_OFFSET: {quant_offset}, CONV_UNSIGNED: {convert_to_unsigned}, INTER: {interpolation_method}") + + + if image_file: + source_directory = os.path.dirname(image_file) + selected_filenames = [os.path.basename(image_file)] + else: + if annotations_filepath and not is_calibration: + with open(annotations_filepath, "r") as annotations_fh: + annotations_struct = json.load(annotations_fh) + ordered_filenames = [image_entry['file_name'] for image_entry in annotations_struct['images']] + elif os.path.isdir(source_directory): + ordered_filenames = [filename for filename in sorted(os.listdir(source_directory)) if any(filename.lower().endswith(extension) for extension in SUPPORTED_EXTENSIONS)] + else: + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), source_directory) + + total_volume = len(ordered_filenames) + + if offset < 0: + offset += total_volume + + if not volume: + volume = total_volume - offset + + selected_filenames = ordered_filenames[offset:offset + volume] + + output_signatures = preprocess_files(selected_filenames, source_directory, destination_directory, square_side, data_type, + convert_to_bgr, normalize_data, normalize_lower, normalize_upper, + subtract_mean, given_channel_means, given_channel_stds, quantize, + quant_scale, quant_offset, convert_to_unsigned, new_file_extension) + + fof_full_path = os.path.join(destination_directory, fof_name) + with open(fof_full_path, 'w') as fof_file: + for filename in output_signatures: + fof_file.write(f'{filename}\n') + +if __name__ == "__main__": + preprocess() + diff --git a/script/get-preprocessed-dataset-imagenet/README-extra.md b/script/get-preprocessed-dataset-imagenet/README-extra.md new file mode 100644 index 0000000000..cc2742fa51 --- /dev/null +++ b/script/get-preprocessed-dataset-imagenet/README-extra.md @@ -0,0 +1,26 @@ +# Get Preprocessed Imagenet Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Imagenet dataset. + +## How To +```bash +cm run script --tags=get,imagenet,preprocessed,_[VARIATION] --dir=[DIRECTORY] --threads=[NUM_THREADS] +``` +where, +* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory +* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. +and the supported [VARIATIONS] (comma separated and beginning with _) are +*`[1]:` Preprocess only 1 image +*`[500]:` Preprocess first 500 images +*`[full]:` Preprocess the full dataset +*`[NHWC]:` Preprocess the dataset with `Channel` component at end +*`[NCHW]:` Preprocess the dataset with `Channel` component at beginning + +## Input Variables coming from Dependencies +* `[CM_DATASET_PATH]:` Folder path to Imagenet dataset +* `[CM_DATASET_AUX_PATH]:` Folder path to Imagenet auxiliary dataset (to get image list) +* `[CM_DATASET_IMAGES_LIST]:` File path containing the image names + +## Exported Variables +* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored + + diff --git a/script/get-preprocessed-dataset-imagenet/README.md b/script/get-preprocessed-dataset-imagenet/README.md new file mode 100644 index 0000000000..83b0ea9348 --- /dev/null +++ b/script/get-preprocessed-dataset-imagenet/README.md @@ -0,0 +1,449 @@ +Automatically generated README for this automation recipe: **get-preprocessed-dataset-imagenet** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocessed-dataset-imagenet,f259d490bbaf45f5) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,imagenet,ILSVRC,image-classification,preprocessed* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset imagenet ILSVRC image-classification preprocessed" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,imagenet,ILSVRC,image-classification,preprocessed` + +`cm run script --tags=get,dataset,imagenet,ILSVRC,image-classification,preprocessed[,variations] [--input_flags]` + +*or* + +`cmr "get dataset imagenet ILSVRC image-classification preprocessed"` + +`cmr "get dataset imagenet ILSVRC image-classification preprocessed [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,imagenet,ILSVRC,image-classification,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,imagenet,ILSVRC,image-classification,preprocessed"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,imagenet,ILSVRC,image-classification,preprocessed) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset imagenet ILSVRC image-classification preprocessed[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_mobilenet_` + - Environment variables: + - *CM_MODEL*: `mobilenet` + - Workflow: + * `_resnet50_` + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_500,validation` + - Workflow: + * `_default` + - Workflow: + * `_for.mobilenet,float32` + - Environment variables: + - *CM_DATASET_QUANTIZE*: `0` + - *CM_DATASET_GIVEN_CHANNEL_MEANS*: `` + - *CM_DATASET_NORMALIZE_DATA*: `1` + - *CM_DATASET_SUBTRACT_MEANS*: `0` + - Workflow: + * `_for.mobilenet,rgb8` + - Environment variables: + - *CM_DATASET_GIVEN_CHANNEL_MEANS*: `` + - *CM_DATASET_SUBTRACT_MEANS*: `0` + - *CM_DATASET_QUANTIZE*: `0` + - *CM_DATASET_NORMALIZE_DATA*: `0` + - *CM_DATASET_DATA_TYPE*: `uint8` + - Workflow: + * `_for.resnet50,float32` + - Workflow: + * `_for.resnet50,rgb8` + - Environment variables: + - *CM_DATASET_GIVEN_CHANNEL_MEANS*: `` + - *CM_DATASET_SUBTRACT_MEANS*: `0` + - *CM_DATASET_NORMALIZE_DATA*: `0` + - *CM_DATASET_QUANTIZE*: `0` + - *CM_DATASET_DATA_TYPE*: `uint8` + - Workflow: + * `_for.resnet50,rgb8,uint8` + - Environment variables: + - *CM_DATASET_GIVEN_CHANNEL_MEANS*: `123.68 116.78 103.94` + - *CM_DATASET_SUBTRACT_MEANS*: `1` + - *CM_DATASET_QUANTIZE*: `1` + - Workflow: + * `_for.resnet50,uint8` + - Environment variables: + - *CM_DATASET_QUANT_SCALE*: `1.18944883` + - *CM_DATASET_QUANT_OFFSET*: `0` + - Workflow: + * `_pytorch` + - Environment variables: + - *CM_PREPROCESS_PYTORCH*: `yes` + - *CM_MODEL*: `resnet50` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torchvision + * CM names: `--adr.['torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_tflite_tpu` + - Environment variables: + - *CM_MODEL*: `resnet50` + - *CM_PREPROCESS_TFLITE_TPU*: `yes` + - Workflow: + +
+ + + * Group "**calibration-option**" +
+ Click here to expand this section. + + * `_mlperf.option1` + - Environment variables: + - *CM_DATASET_CALIBRATION_OPTION*: `one` + - Workflow: + * `_mlperf.option2` + - Environment variables: + - *CM_DATASET_CALIBRATION_OPTION*: `two` + - Workflow: + +
+ + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_TYPE*: `calibration` + - Workflow: + * **`_validation`** (default) + - Environment variables: + - *CM_DATASET_TYPE*: `validation` + - Workflow: + +
+ + + * Group "**extension**" +
+ Click here to expand this section. + + * `_rgb32` + - Environment variables: + - *CM_DATASET_PREPROCESSED_EXTENSION*: `rgb32` + - Workflow: + * `_rgb8` + - Environment variables: + - *CM_DATASET_PREPROCESSED_EXTENSION*: `rgb8` + - Workflow: + +
+ + + * Group "**interpolation-method**" +
+ Click here to expand this section. + + * `_inter.area` + - Environment variables: + - *CM_DATASET_INTERPOLATION_METHOD*: `INTER_AREA` + - Workflow: + * `_inter.linear` + - Environment variables: + - *CM_DATASET_INTERPOLATION_METHOD*: `INTER_LINEAR` + - Workflow: + +
+ + + * Group "**layout**" +
+ Click here to expand this section. + + * **`_NCHW`** (default) + - Environment variables: + - *CM_DATASET_DATA_LAYOUT*: `NCHW` + - Workflow: + * `_NHWC` + - Environment variables: + - *CM_DATASET_DATA_LAYOUT*: `NHWC` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_for.mobilenet` + - Workflow: + * `_for.resnet50` + - Environment variables: + - *CM_DATASET_SUBTRACT_MEANS*: `1` + - *CM_DATASET_GIVEN_CHANNEL_MEANS*: `123.68 116.78 103.94` + - *CM_DATASET_NORMALIZE_DATA*: `0` + - *CM_DATASET_INTERPOLATION_METHOD*: `INTER_AREA` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_float32` + - Environment variables: + - *CM_DATASET_DATA_TYPE*: `float32` + - *CM_DATASET_QUANTIZE*: `0` + - *CM_DATASET_CONVERT_TO_UNSIGNED*: `0` + - Workflow: + * `_int8` + - Environment variables: + - *CM_DATASET_DATA_TYPE*: `int8` + - *CM_DATASET_QUANTIZE*: `1` + - *CM_DATASET_CONVERT_TO_UNSIGNED*: `0` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_DATASET_DATA_TYPE*: `uint8` + - *CM_DATASET_DATA_TYPE_INPUT*: `float32` + - *CM_DATASET_QUANTIZE*: `1` + - *CM_DATASET_CONVERT_TO_UNSIGNED*: `1` + - Workflow: + +
+ + + * Group "**preprocessing-source**" +
+ Click here to expand this section. + + * `_generic-preprocessor` + - Environment variables: + - *CM_DATASET_REFERENCE_PREPROCESSOR*: `0` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * get,generic,image-preprocessor + - CM script: [get-preprocesser-script-generic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocesser-script-generic) + * **`_mlcommons-reference-preprocessor`** (default) + - Environment variables: + - *CM_DATASET_REFERENCE_PREPROCESSOR*: `1` + - Workflow: + +
+ + + * Group "**resolution**" +
+ Click here to expand this section. + + * `_resolution.#` + - Environment variables: + - *CM_DATASET_INPUT_SQUARE_SIDE*: `#` + - Workflow: + * **`_resolution.224`** (default) + - Environment variables: + - *CM_DATASET_INPUT_SQUARE_SIDE*: `224` + - Workflow: + +
+ + + * Group "**size**" +
+ Click here to expand this section. + + * `_1` + - Environment variables: + - *CM_DATASET_SIZE*: `1` + - Workflow: + * `_500` + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - Workflow: + * `_full` + - Environment variables: + - *CM_DATASET_SIZE*: `50000` + - Workflow: + * `_size.#` + - Environment variables: + - *CM_DATASET_SIZE*: `#` + - Workflow: + +
+ + +#### Default variations + +`_NCHW,_mlcommons-reference-preprocessor,_resolution.224,_validation` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` +* `--imagenet_path=value` → `CM_IMAGENET_PATH=value` +* `--imagenet_preprocessed_path=value` → `CM_IMAGENET_PREPROCESSED_PATH=value` +* `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET_CROP_FACTOR: `87.5` +* CM_DATASET_DATA_TYPE: `float32` +* CM_DATASET_DATA_LAYOUT: `NCHW` +* CM_DATASET_QUANT_SCALE: `1` +* CM_DATASET_QUANTIZE: `0` +* CM_DATASET_QUANT_OFFSET: `0` +* CM_DATASET_PREPROCESSED_EXTENSION: `npy` +* CM_DATASET_CONVERT_TO_UNSIGNED: `0` +* CM_DATASET_REFERENCE_PREPROCESSOR: `1` +* CM_PREPROCESS_VGG: `yes` +* CM_MODEL: `resnet50` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/_cm.json)*** + * get,python3 + * `if (CM_IMAGENET_PREPROCESSED_PATH != on)` + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,image-classification,original + * `if (CM_IMAGENET_PREPROCESSED_PATH != on)` + * CM names: `--adr.['original-dataset']...` + - CM script: [get-dataset-imagenet-val](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-val) + * get,dataset-aux,image-classification,imagenet-aux + * `if (CM_DATASET_TYPE == validation) AND (CM_IMAGENET_PREPROCESSED_PATH != on)` + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + * get,dataset,imagenet,calibration + * `if (CM_DATASET_TYPE == calibration)` + - CM script: [get-dataset-imagenet-calibration](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-calibration) + * get,generic-python-lib,_package.opencv-python-headless + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pillow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * mlperf,mlcommons,inference,source,src + * `if (CM_DATASET_REFERENCE_PREPROCESSOR == 1) AND (CM_IMAGENET_PREPROCESSED_PATH != on)` + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-imagenet/_cm.json) + +___ +### Script output +`cmr "get dataset imagenet ILSVRC image-classification preprocessed [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_DATA_TYPE_INPUT` +* `CM_DATASET_IMAGES_LIST` +* `CM_DATASET_PREPROCESSED_IMAGENAMES_LIST` +* `CM_DATASET_PREPROCESSED_IMAGES_LIST` +* `CM_DATASET_PREPROCESSED_PATH` +* `CM_DATASET_SIZE` +* `CM_DATASET_TYPE` \ No newline at end of file diff --git a/script/get-preprocessed-dataset-imagenet/_cm.json b/script/get-preprocessed-dataset-imagenet/_cm.json new file mode 100644 index 0000000000..99aad32e8a --- /dev/null +++ b/script/get-preprocessed-dataset-imagenet/_cm.json @@ -0,0 +1,388 @@ +{ + "alias": "get-preprocessed-dataset-imagenet", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3", + "skip_if_env": { + "CM_IMAGENET_PREPROCESSED_PATH": [ "on" ] + } + }, + { + "names": [ + "original-dataset" + ], + "tags": "get,dataset,image-classification,original", + "skip_if_env": { + "CM_IMAGENET_PREPROCESSED_PATH": [ "on" ] + } + }, + { + "tags": "get,dataset-aux,image-classification,imagenet-aux", + "skip_if_env": { + "CM_IMAGENET_PREPROCESSED_PATH": [ "on" ] + }, + "enable_if_env": { + "CM_DATASET_TYPE": [ "validation" ] + } + }, + { + "tags": "get,dataset,imagenet,calibration", + "enable_if_env": { + "CM_DATASET_TYPE": [ "calibration" ] + } + }, + { + "tags": "get,generic-python-lib,_package.opencv-python-headless" + }, + { + "tags": "get,generic-python-lib,_pillow" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlperf,mlcommons,inference,source,src", + "enable_if_env": { + "CM_DATASET_REFERENCE_PREPROCESSOR": [ "1" ] + }, + "skip_if_env": { + "CM_IMAGENET_PREPROCESSED_PATH": [ "on" ] + } + } + ], + "default_env": { + "CM_DATASET_CROP_FACTOR": "87.5", + "CM_DATASET_DATA_TYPE": "float32", + "CM_DATASET_DATA_LAYOUT": "NCHW", + "CM_DATASET_QUANT_SCALE": "1", + "CM_DATASET_QUANTIZE": "0", + "CM_DATASET_QUANT_OFFSET": "0", + "CM_DATASET_PREPROCESSED_EXTENSION": "npy", + "CM_DATASET_CONVERT_TO_UNSIGNED": "0", + "CM_DATASET_REFERENCE_PREPROCESSOR": "1", + "CM_PREPROCESS_VGG": "yes", + "CM_MODEL": "resnet50" + }, + "env": { + "CM_DATASET": "imagenet" + }, + "input_mapping": { + "dir": "CM_DATASET_PREPROCESSED_PATH", + "threads": "CM_NUM_PREPROCESS_THREADS", + "imagenet_path": "CM_IMAGENET_PATH", + "imagenet_preprocessed_path": "CM_IMAGENET_PREPROCESSED_PATH" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "imagenet", + "ILSVRC", + "image-classification", + "preprocessed" + ], + "uid": "f259d490bbaf45f5", + "variations": { + "pytorch": { + "default_variations": { + "preprocessing-source": "mlcommons-reference-preprocessor" + }, + "env": { + "CM_PREPROCESS_PYTORCH": "yes", + "CM_MODEL": "resnet50" + }, + "deps": [ + { + "names": [ "torchvision" ], + "tags": "get,generic-python-lib,_torchvision" + } + ] + }, + "tflite_tpu": { + "default_variations": { + "preprocessing-source": "mlcommons-reference-preprocessor" + }, + "env": { + "CM_MODEL": "resnet50", + "CM_PREPROCESS_TFLITE_TPU": "yes" + } + }, + "1": { + "group": "size", + "add_deps": { + "original-dataset": { + "tags": "_2012-1" + } + }, + "env": { + "CM_DATASET_SIZE": "1" + } + }, + "500": { + "group": "size", + "add_deps": { + "original-dataset": { + "tags": "_2012" + } + }, + "env": { + "CM_DATASET_SIZE": "500" + } + }, + "500,validation": { + "add_deps": { + "original-dataset": { + "tags": "_size.500" + } + } + }, + "size.#": { + "group": "size", + "add_deps": { + "original-dataset": { + "tags": "_#" + } + }, + "env": { + "CM_DATASET_SIZE": "#" + } + }, + "full": { + "group": "size", + "add_deps": { + "original-dataset": { + "tags": "_full" + } + }, + "env": { + "CM_DATASET_SIZE": "50000" + } + }, + "NCHW": { + "group": "layout", + "default": true, + "env": { + "CM_DATASET_DATA_LAYOUT": "NCHW" + } + }, + "NHWC": { + "group": "layout", + "env": { + "CM_DATASET_DATA_LAYOUT": "NHWC" + } + }, + "resnet50_": { + "env": { + "CM_MODEL": "resnet50" + }, + "default_variations": { + "preprocessing-source": "generic-preprocessor", + "interpolation-method": "inter.area", + "extension": "rgb32", + "precision": "float32" + } + }, + "mobilenet_": { + "env": { + "CM_MODEL": "mobilenet" + }, + "default_variations": { + "preprocessing-source": "generic-preprocessor", + "interpolation-method": "inter.linear", + "extension": "rgb32", + "precision": "int8" + } + }, + "float32": { + "group": "precision", + "env": { + "CM_DATASET_DATA_TYPE": "float32", + "CM_DATASET_QUANTIZE": "0", + "CM_DATASET_CONVERT_TO_UNSIGNED": "0" + } + }, + "uint8": { + "group": "precision", + "env": { + "CM_DATASET_DATA_TYPE": "uint8", + "CM_DATASET_DATA_TYPE_INPUT": "float32", + "CM_DATASET_QUANTIZE": "1", + "CM_DATASET_CONVERT_TO_UNSIGNED": "1" + } + }, + "int8": { + "group": "precision", + "env": { + "CM_DATASET_DATA_TYPE": "int8", + "CM_DATASET_QUANTIZE": "1", + "CM_DATASET_CONVERT_TO_UNSIGNED": "0" + } + }, + "for.resnet50": { + "base": [ "resnet50_" ], + "group": "model", + "env": { + "CM_DATASET_SUBTRACT_MEANS": "1", + "CM_DATASET_GIVEN_CHANNEL_MEANS": "123.68 116.78 103.94", + "CM_DATASET_NORMALIZE_DATA": "0", + "CM_DATASET_INTERPOLATION_METHOD": "INTER_AREA" + } + }, + "for.resnet50,uint8": { + "env": { + "CM_DATASET_QUANT_SCALE": "1.18944883", + "CM_DATASET_QUANT_OFFSET": "0" + } + }, + "for.resnet50,float32": { + "env": { + } + }, + "for.resnet50,rgb8": { + "env": { + "CM_DATASET_GIVEN_CHANNEL_MEANS": "", + "CM_DATASET_SUBTRACT_MEANS": "0", + "CM_DATASET_NORMALIZE_DATA": "0", + "CM_DATASET_QUANTIZE": "0", + "CM_DATASET_DATA_TYPE": "uint8" + } + }, + "for.resnet50,rgb8,uint8": { + "env": { + "CM_DATASET_GIVEN_CHANNEL_MEANS": "123.68 116.78 103.94", + "CM_DATASET_SUBTRACT_MEANS": "1", + "CM_DATASET_QUANTIZE": "1" + } + }, + "for.mobilenet": { + "base": [ "mobilenet_" ], + "group": "model", + "env": { + } + }, + "for.mobilenet,float32": { + "env": { + "CM_DATASET_QUANTIZE": "0", + "CM_DATASET_GIVEN_CHANNEL_MEANS": "", + "CM_DATASET_NORMALIZE_DATA": "1", + "CM_DATASET_SUBTRACT_MEANS": "0" + } + }, + "for.mobilenet,rgb8": { + "env": { + "CM_DATASET_GIVEN_CHANNEL_MEANS": "", + "CM_DATASET_SUBTRACT_MEANS": "0", + "CM_DATASET_QUANTIZE": "0", + "CM_DATASET_NORMALIZE_DATA": "0", + "CM_DATASET_DATA_TYPE": "uint8" + } + }, + "rgb8": { + "group": "extension", + "env": { + "CM_DATASET_PREPROCESSED_EXTENSION": "rgb8" + } + }, + "rgb32": { + "group": "extension", + "env": { + "CM_DATASET_PREPROCESSED_EXTENSION": "rgb32" + } + }, + "inter.linear": { + "group": "interpolation-method", + "env": { + "CM_DATASET_INTERPOLATION_METHOD": "INTER_LINEAR" + } + }, + "inter.area": { + "group": "interpolation-method", + "env": { + "CM_DATASET_INTERPOLATION_METHOD": "INTER_AREA" + } + }, + "generic-preprocessor": { + "group": "preprocessing-source", + "prehook_deps": [ + { + "tags": "get,generic,image-preprocessor" + } + ], + "env": { + "CM_DATASET_REFERENCE_PREPROCESSOR": "0" + } + }, + "mlcommons-reference-preprocessor": { + "group": "preprocessing-source", + "default": true, + "env": { + "CM_DATASET_REFERENCE_PREPROCESSOR": "1" + } + }, + "resolution.224": { + "group": "resolution", + "default": true, + "env": { + "CM_DATASET_INPUT_SQUARE_SIDE": "224" + } + }, + "resolution.#": { + "group": "resolution", + "env": { + "CM_DATASET_INPUT_SQUARE_SIDE": "#" + } + }, + "validation": { + "group": "dataset-type", + "default": "true", + "env": { + "CM_DATASET_TYPE": "validation" + }, + "default_variations": { + "size": "500" + } + }, + "calibration": { + "group": "dataset-type", + "env": { + "CM_DATASET_TYPE": "calibration" + }, + "default_variations": { + "calibration-option": "mlperf.option1", + "preprocessing-source": "generic-preprocessor" + }, + "add_deps": { + "original-dataset": { + "tags": "_full" + } + } + }, + "mlperf.option1": { + "group": "calibration-option", + "env": { + "CM_DATASET_CALIBRATION_OPTION": "one" + } + }, + "mlperf.option2": { + "group": "calibration-option", + "env": { + "CM_DATASET_CALIBRATION_OPTION": "two" + } + }, + "default": { + } + }, + "docker": { + "run": false + } +} diff --git a/script/get-preprocessed-dataset-imagenet/customize.py b/script/get-preprocessed-dataset-imagenet/customize.py new file mode 100644 index 0000000000..f744e1330f --- /dev/null +++ b/script/get-preprocessed-dataset-imagenet/customize.py @@ -0,0 +1,61 @@ +from cmind import utils +import os +from os.path import exists +import shutil +import glob + +def preprocess(i): + + env = i['env'] + if 'CM_IMAGENET_PREPROCESSED_PATH' in env: + files = glob.glob(env['CM_IMAGENET_PREPROCESSED_PATH']+"/**/"+env['CM_IMAGENET_PREPROCESSED_FILENAME'], recursive = True) + if files: + env['CM_DATASET_PREPROCESSED_PATH'] = env['CM_IMAGENET_PREPROCESSED_PATH'] + else: + return {'return': 1, 'error': 'No preprocessed images found in '+env['CM_IMAGENET_PREPROCESSED_PATH']} + else: + if env.get('CM_DATASET_REFERENCE_PREPROCESSOR',"0") == "1": + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + if env['CM_DATASET_TYPE'] == "validation" and not exists(os.path.join(env['CM_DATASET_PATH'], "val_map.txt")): + shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), os.path.join(env['CM_DATASET_PATH'], + "val_map.txt")) + + preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + + if env.get('CM_DATASET_TYPE', '') == "validation" and not exists(os.path.join(preprocessed_path, "val_map.txt")): + shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), + os.path.join(preprocessed_path, "val_map.txt")) + + if env.get('CM_DATASET_TYPE', '') == "calibration": + env['CM_DATASET_IMAGES_LIST'] = env['CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH'] + env['CM_DATASET_SIZE'] = 500 + + if env.get('CM_DATASET_DATA_TYPE_INPUT', '') == '': + env['CM_DATASET_DATA_TYPE_INPUT'] = env['CM_DATASET_DATA_TYPE'] + + return {'return': 0} + +def postprocess(i): + + env = i['env'] + + # finalize path + preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + preprocessed_images_list = [] + preprocessed_imagenames_list = [] + + match_text = "/*."+env.get("CM_DATASET_PREPROCESSED_EXTENSION","*") + for filename in sorted(glob.glob(preprocessed_path + match_text)): + preprocessed_images_list.append(filename) + preprocessed_imagenames_list.append(os.path.basename(filename)) + with open("preprocessed_files.txt", "w") as f: + f.write("\n".join(preprocessed_images_list)) + with open("preprocessed_filenames.txt", "w") as f: + f.write("\n".join(preprocessed_imagenames_list)) + + env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join(os.getcwd(), "preprocessed_files.txt") + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join(os.getcwd(), "preprocessed_filenames.txt") + + return {'return':0} diff --git a/script/get-preprocessed-dataset-imagenet/preprocess.py b/script/get-preprocessed-dataset-imagenet/preprocess.py new file mode 100644 index 0000000000..aa804d19d9 --- /dev/null +++ b/script/get-preprocessed-dataset-imagenet/preprocess.py @@ -0,0 +1,40 @@ +import os +import sys + +if os.environ.get('CM_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": + import generic_preprocess + generic_preprocess.preprocess() +else: + mlperf_src_path = os.environ['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] + python_path = os.path.join(mlperf_src_path, "python") + sys.path.insert(0, python_path) + + import imagenet + import dataset + + dataset_path = os.environ['CM_DATASET_PATH'] + dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) + img_format = os.environ.get('CM_DATASET_DATA_LAYOUT', 'NHWC') + count = int(os.environ.get('CM_DATASET_SIZE', 1)) + preprocessed_dir = os.environ.get('CM_DATASET_PREPROCESSED_PATH', os.getcwd()) + threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) + threads = int(os.environ.get('CM_NUM_PREPROCESS_THREADS', threads)) + + if os.environ.get('CM_MODEL') == 'mobilenet': + pre_process = dataset.pre_process_mobilenet + elif os.environ.get('CM_MODEL', 'resnet50') == 'resnet50' and os.environ.get('CM_PREPROCESS_PYTORCH', '') == "yes": + pre_process = dataset.pre_process_imagenet_pytorch + elif os.environ.get('CM_MODEL', 'resnet50') == 'resnet50' and os.environ.get('CM_PREPROCESS_TFLITE_TPU', '') == "yes": + pre_process = dataset.pre_process_imagenet_tflite_tpu + else: + pre_process = dataset.pre_process_vgg + + imagenet.Imagenet(data_path=dataset_path, + image_list=dataset_list, + name="imagenet", + image_format=img_format, + pre_process = pre_process, + use_cache=True, + count=count, + threads=threads, + preprocessed_dir=preprocessed_dir) diff --git a/script/get-preprocessed-dataset-imagenet/run.bat b/script/get-preprocessed-dataset-imagenet/run.bat new file mode 100644 index 0000000000..7f6036f841 --- /dev/null +++ b/script/get-preprocessed-dataset-imagenet/run.bat @@ -0,0 +1,4 @@ +@echo off + +%CM_PYTHON_BIN% %CM_TMP_CURRENT_SCRIPT_PATH%\preprocess.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-preprocessed-dataset-imagenet/run.sh b/script/get-preprocessed-dataset-imagenet/run.sh new file mode 100644 index 0000000000..c6e17411b9 --- /dev/null +++ b/script/get-preprocessed-dataset-imagenet/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +if [ ! -z ${CM_IMAGENET_PREPROCESSED_PATH+x} ]; then + exit 0 +fi +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py +test $? -eq 0 || exit 1 diff --git a/script/get-preprocessed-dataset-kits19/README.md b/script/get-preprocessed-dataset-kits19/README.md new file mode 100644 index 0000000000..396d134a9b --- /dev/null +++ b/script/get-preprocessed-dataset-kits19/README.md @@ -0,0 +1,234 @@ +Automatically generated README for this automation recipe: **get-preprocessed-dataset-kits19** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocessed-dataset-kits19,2094d9b9ab6c4c9e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,medical-imaging,kits19,preprocessed* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset medical-imaging kits19 preprocessed" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,medical-imaging,kits19,preprocessed` + +`cm run script --tags=get,dataset,medical-imaging,kits19,preprocessed[,variations] [--input_flags]` + +*or* + +`cmr "get dataset medical-imaging kits19 preprocessed"` + +`cmr "get dataset medical-imaging kits19 preprocessed [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,medical-imaging,kits19,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,medical-imaging,kits19,preprocessed"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,medical-imaging,kits19,preprocessed) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset medical-imaging kits19 preprocessed[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_nvidia` + - Environment variables: + - *CM_PREPROCESSING_BY_NVIDIA*: `yes` + - Workflow: + +
+ + + * Group "**dataset-count**" +
+ Click here to expand this section. + + * `_1` + - Environment variables: + - *CM_DATASET_SIZE*: `1` + - Workflow: + * `_5` + - Environment variables: + - *CM_DATASET_SIZE*: `5` + - Workflow: + * `_50` + - Environment variables: + - *CM_DATASET_SIZE*: `50` + - Workflow: + * `_500` + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - Workflow: + * `_full` + - Environment variables: + - *CM_DATASET_SIZE*: `` + - Workflow: + +
+ + + * Group "**dataset-precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_DATASET_DTYPE*: `fp32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_DATASET_DTYPE*: `int8` + - Workflow: + +
+ + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_PATH*: `<<>>` + - Workflow: + * **`_validation`** (default) + - Workflow: + +
+ + +#### Default variations + +`_fp32,_validation` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` +* `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET: `kits19` +* CM_DATASET_DTYPE: `fp32` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,medical-imaging,kits19,original + * CM names: `--adr.['original-dataset']...` + - CM script: [get-dataset-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-kits19) + * mlperf,mlcommons,inference,source,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_scipy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_nibabel + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + * CM names: `--adr.['numpy']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-kits19/_cm.json) + +___ +### Script output +`cmr "get dataset medical-imaging kits19 preprocessed [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_PREPROCESSED_PATH` \ No newline at end of file diff --git a/script/get-preprocessed-dataset-kits19/_cm.json b/script/get-preprocessed-dataset-kits19/_cm.json new file mode 100644 index 0000000000..b69f856721 --- /dev/null +++ b/script/get-preprocessed-dataset-kits19/_cm.json @@ -0,0 +1,154 @@ +{ + "alias": "get-preprocessed-dataset-kits19", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "default_env": { + "CM_DATASET": "kits19", + "CM_DATASET_DTYPE": "fp32" + }, + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "names": [ + "original-dataset" + ], + "tags": "get,dataset,medical-imaging,kits19,original" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlperf,mlcommons,inference,source,src" + }, + { + "tags": "get,generic-python-lib,_scipy" + }, + { + "tags": "get,generic-python-lib,_nibabel" + }, + { + "names": [ + "numpy" + ], + "tags": "get,generic-python-lib,_numpy" + } + ], + "input_mapping": { + "dir": "CM_DATASET_PREPROCESSED_PATH", + "threads": "CM_NUM_PREPROCESS_THREADS" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "medical-imaging", + "kits19", + "preprocessed" + ], + "uid": "2094d9b9ab6c4c9e", + "variations": { + "1": { + "adr": { + "original-dataset": { + "tags": "_1" + } + }, + "env": { + "CM_DATASET_SIZE": "1" + }, + "group": "dataset-count" + }, + "5": { + "adr": { + "original-dataset": { + "tags": "_5" + } + }, + "env": { + "CM_DATASET_SIZE": "5" + }, + "group": "dataset-count" + }, + "50": { + "adr": { + "original-dataset": { + "tags": "_50" + } + }, + "env": { + "CM_DATASET_SIZE": "50" + }, + "group": "dataset-count" + }, + "500": { + "adr": { + "original-dataset": { + "tags": "_500" + } + }, + "env": { + "CM_DATASET_SIZE": "500" + }, + "group": "dataset-count" + }, + "calibration": { + "add_deps": { + "original-dataset": { + "tags": "_calibration" + } + }, + "env": { + "CM_DATASET_PATH": "<<>>" + }, + "group": "dataset-type" + }, + "fp32": { + "default": true, + "env": { + "CM_DATASET_DTYPE": "fp32" + }, + "group": "dataset-precision" + }, + "full": { + "adr": { + "original-dataset": { + "tags": "_full" + } + }, + "env": { + "CM_DATASET_SIZE": "" + }, + "group": "dataset-count" + }, + "int8": { + "env": { + "CM_DATASET_DTYPE": "int8" + }, + "group": "dataset-precision" + }, + "nvidia": { + "env": { + "CM_PREPROCESSING_BY_NVIDIA": "yes" + } + }, + "validation": { + "add_deps": { + "original-dataset": { + "tags": "_validation" + } + }, + "default": true, + "group": "dataset-type" + } + } +} diff --git a/script/get-preprocessed-dataset-kits19/customize.py b/script/get-preprocessed-dataset-kits19/customize.py new file mode 100644 index 0000000000..8de0593753 --- /dev/null +++ b/script/get-preprocessed-dataset-kits19/customize.py @@ -0,0 +1,21 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + preprocess_src = os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], 'preprocess.py') + cmd = 'cd '+ env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} preprocess.py --raw_data_dir ' + env['CM_DATASET_PATH'] + ' --results_dir ' + os.getcwd() + ' --mode preprocess' + env['CM_TMP_CMD'] = cmd + + return {'return': 0} + +def postprocess(i): + env = i['env'] + if 'CM_DATASET_PREPROCESSED_PATH' not in env: + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-kits19/run.sh b/script/get-preprocessed-dataset-kits19/run.sh new file mode 100644 index 0000000000..a9f248c38e --- /dev/null +++ b/script/get-preprocessed-dataset-kits19/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd=${CM_TMP_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/get-preprocessed-dataset-librispeech/README.md b/script/get-preprocessed-dataset-librispeech/README.md new file mode 100644 index 0000000000..17156741ac --- /dev/null +++ b/script/get-preprocessed-dataset-librispeech/README.md @@ -0,0 +1,224 @@ +Automatically generated README for this automation recipe: **get-preprocessed-dataset-librispeech** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocessed-dataset-librispeech,e9f62fc969d5483a) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,speech-recognition,librispeech,preprocessed* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset speech-recognition librispeech preprocessed" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,speech-recognition,librispeech,preprocessed` + +`cm run script --tags=get,dataset,speech-recognition,librispeech,preprocessed[,variations] [--input_flags]` + +*or* + +`cmr "get dataset speech-recognition librispeech preprocessed"` + +`cmr "get dataset speech-recognition librispeech preprocessed [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,speech-recognition,librispeech,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,speech-recognition,librispeech,preprocessed"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,speech-recognition,librispeech,preprocessed) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset speech-recognition librispeech preprocessed[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**dataset-count**" +
+ Click here to expand this section. + + * `_1` + - Environment variables: + - *CM_DATASET_SIZE*: `1` + - Workflow: + * `_5` + - Environment variables: + - *CM_DATASET_SIZE*: `5` + - Workflow: + * `_50` + - Environment variables: + - *CM_DATASET_SIZE*: `50` + - Workflow: + * `_500` + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - Workflow: + * `_full` + - Environment variables: + - *CM_DATASET_SIZE*: `` + - Workflow: + +
+ + + * Group "**dataset-precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_DATASET_DTYPE*: `fp32` + - Workflow: + * `_int8` + - Environment variables: + - *CM_DATASET_DTYPE*: `int8` + - Workflow: + +
+ + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_PATH*: `<<>>` + - Workflow: + * **`_validation`** (default) + - Workflow: + +
+ + +#### Default variations + +`_fp32,_validation` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` +* `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET: `kits19` +* CM_DATASET_DTYPE: `fp32` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,speech-recognition,librispeech,original + * CM names: `--adr.['original-dataset']...` + - CM script: [get-dataset-librispeech](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-librispeech) + * mlperf,mlcommons,inference,source,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_sox + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pandas + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tqdm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,sys-util,generic,_sox + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-librispeech/_cm.json) + +___ +### Script output +`cmr "get dataset speech-recognition librispeech preprocessed [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_PREPROCESSED_JSON` +* `CM_DATASET_PREPROCESSED_PATH` \ No newline at end of file diff --git a/script/get-preprocessed-dataset-librispeech/_cm.json b/script/get-preprocessed-dataset-librispeech/_cm.json new file mode 100644 index 0000000000..a297c0b6c6 --- /dev/null +++ b/script/get-preprocessed-dataset-librispeech/_cm.json @@ -0,0 +1,149 @@ +{ + "alias": "get-preprocessed-dataset-librispeech", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "default_env": { + "CM_DATASET": "kits19", + "CM_DATASET_DTYPE": "fp32" + }, + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "names": [ + "original-dataset" + ], + "tags": "get,dataset,speech-recognition,librispeech,original" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlperf,mlcommons,inference,source,src" + }, + { + "tags": "get,generic-python-lib,_sox" + }, + { + "tags": "get,generic-python-lib,_pandas" + }, + { + "tags": "get,generic-python-lib,_tqdm" + }, + { + "tags": "get,sys-util,generic,_sox" + } + ], + "input_mapping": { + "dir": "CM_DATASET_PREPROCESSED_PATH", + "threads": "CM_NUM_PREPROCESS_THREADS" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "speech-recognition", + "librispeech", + "preprocessed" + ], + "uid": "e9f62fc969d5483a", + "variations": { + "1": { + "adr": { + "original-dataset": { + "tags": "_1" + } + }, + "env": { + "CM_DATASET_SIZE": "1" + }, + "group": "dataset-count" + }, + "5": { + "adr": { + "original-dataset": { + "tags": "_5" + } + }, + "env": { + "CM_DATASET_SIZE": "5" + }, + "group": "dataset-count" + }, + "50": { + "adr": { + "original-dataset": { + "tags": "_50" + } + }, + "env": { + "CM_DATASET_SIZE": "50" + }, + "group": "dataset-count" + }, + "500": { + "adr": { + "original-dataset": { + "tags": "_500" + } + }, + "env": { + "CM_DATASET_SIZE": "500" + }, + "group": "dataset-count" + }, + "calibration": { + "add_deps": { + "original-dataset": { + "tags": "_calibration" + } + }, + "env": { + "CM_DATASET_PATH": "<<>>" + }, + "group": "dataset-type" + }, + "fp32": { + "default": true, + "env": { + "CM_DATASET_DTYPE": "fp32" + }, + "group": "dataset-precision" + }, + "full": { + "adr": { + "original-dataset": { + "tags": "_full" + } + }, + "env": { + "CM_DATASET_SIZE": "" + }, + "group": "dataset-count" + }, + "int8": { + "env": { + "CM_DATASET_DTYPE": "int8" + }, + "group": "dataset-precision" + }, + "validation": { + "add_deps": { + "original-dataset": { + "tags": "validation" + } + }, + "default": true, + "group": "dataset-type" + } + } +} diff --git a/script/get-preprocessed-dataset-librispeech/customize.py b/script/get-preprocessed-dataset-librispeech/customize.py new file mode 100644 index 0000000000..e5a8a12e2b --- /dev/null +++ b/script/get-preprocessed-dataset-librispeech/customize.py @@ -0,0 +1,21 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + preprocess_src = os.path.join(env['CM_MLPERF_INFERENCE_RNNT_PATH'], 'pytorch', 'utils', 'convert_librispeech.py') + cmd = 'cd '+ env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} ' + preprocess_src + ' --input_dir ' + env['CM_DATASET_LIBRISPEECH_PATH'] + ' --dest_dir ' + os.path.join(os.getcwd(), 'dev-clean-wav') + ' --output_json ' + os.path.join(os.getcwd(), 'dev-clean-wav.json') + env['CM_TMP_CMD'] = cmd + + return {'return': 0} + +def postprocess(i): + env = i['env'] + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join(os.getcwd(), 'dev-clean-wav') + env['CM_DATASET_PREPROCESSED_JSON'] = os.path.join(os.getcwd(), 'dev-clean-wav.json') + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-librispeech/run.sh b/script/get-preprocessed-dataset-librispeech/run.sh new file mode 100644 index 0000000000..a9f248c38e --- /dev/null +++ b/script/get-preprocessed-dataset-librispeech/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd=${CM_TMP_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/get-preprocessed-dataset-openimages/README-extra.md b/script/get-preprocessed-dataset-openimages/README-extra.md new file mode 100644 index 0000000000..f5c013f9aa --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/README-extra.md @@ -0,0 +1,28 @@ +# Get Preprocessed Open Images Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Imagenet dataset. + +## How To +```bash +cm run script --tags=get,imagenet,preprocessed,_[VARIATION] --dir=[DIRECTORY] --threads=[NUM_THREADS] +``` +where, +* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory +* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. +and the supported [VARIATIONS] (comma separated and beginning with _) are +*`[1]:` Preprocess only 1 image +*`[500]:` Preprocess first 500 images +*`[full]:` Preprocess the full dataset +*`[validation]:` Preprocess the validation dataset +*`[calibration]:` Preprocess the calibration dataset +*`[NHWC]:` Preprocess the dataset with `Channel` component at end +*`[NCHW]:` Preprocess the dataset with `Channel` component at beginning + +## Input Variables coming from Dependencies +* `[CM_DATASET_PATH]:` Folder path to Imagenet dataset +* `[CM_DATASET_IMAGES_LIST]:` File path containing the image names +* `[CM_DATASET_OPENIMAGES_RESIZE]:` Image width to resize to (default 800) + +## Exported Variables +* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored + + diff --git a/script/get-preprocessed-dataset-openimages/README.md b/script/get-preprocessed-dataset-openimages/README.md new file mode 100644 index 0000000000..d2deba2d80 --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/README.md @@ -0,0 +1,403 @@ +Automatically generated README for this automation recipe: **get-preprocessed-dataset-openimages** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocessed-dataset-openimages,9842f1be8cba4c7b) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,openimages,open-images,object-detection,preprocessed* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset openimages open-images object-detection preprocessed" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,openimages,open-images,object-detection,preprocessed` + +`cm run script --tags=get,dataset,openimages,open-images,object-detection,preprocessed[,variations] [--input_flags]` + +*or* + +`cmr "get dataset openimages open-images object-detection preprocessed"` + +`cmr "get dataset openimages open-images object-detection preprocessed [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openimages,open-images,object-detection,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,openimages,open-images,object-detection,preprocessed"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,openimages,open-images,object-detection,preprocessed) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset openimages open-images object-detection preprocessed[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_filter` + - Workflow: + * `_filter,calibration` + - Environment variables: + - *CM_DATASET_CALIBRATION_FILTER*: `yes` + - Workflow: + * `_for.retinanet.onnx` + - Environment variables: + - *CM_ML_MODEL_NAME*: `retinanet` + - *CM_DATASET_SUBTRACT_MEANS*: `1` + - *CM_DATASET_GIVEN_CHANNEL_MEANS*: `0.485 0.456 0.406` + - *CM_DATASET_GIVEN_CHANNEL_STDS*: `0.229 0.224 0.225` + - *CM_DATASET_NORMALIZE_DATA*: `0` + - *CM_DATASET_NORMALIZE_LOWER*: `0.0` + - *CM_DATASET_NORMALIZE_UPPER*: `1.0` + - *CM_DATASET_CONVERT_TO_BGR*: `0` + - *CM_DATASET_CROP_FACTOR*: `100.0` + - Workflow: + * `_for.retinanet.onnx,fp32` + - Workflow: + * `_for.retinanet.onnx,uint8` + - Environment variables: + - *CM_DATASET_QUANT_SCALE*: `0.0186584499` + - *CM_DATASET_QUANT_OFFSET*: `114` + - Workflow: + * `_full,validation` + - Environment variables: + - *CM_DATASET_SIZE*: `24781` + - Workflow: + * `_nvidia` + - Environment variables: + - *CM_PREPROCESSING_BY_NVIDIA*: `yes` + - Workflow: + * `_quant-offset.#` + - Workflow: + * `_quant-scale.#` + - Workflow: + +
+ + + * Group "**annotations**" +
+ Click here to expand this section. + + * `_custom-annotations` + - Workflow: + * **`_default-annotations`** (default) + - Workflow: + +
+ + + * Group "**dataset-count**" +
+ Click here to expand this section. + + * **`_50`** (default) + - Environment variables: + - *CM_DATASET_SIZE*: `50` + - Workflow: + * `_500` + - Environment variables: + - *CM_DATASET_SIZE*: `500` + - Workflow: + * `_full` + - Workflow: + * `_size.#` + - Environment variables: + - *CM_DATASET_SIZE*: `#` + - Workflow: + +
+ + + * Group "**dataset-layout**" +
+ Click here to expand this section. + + * **`_NCHW`** (default) + - Environment variables: + - *CM_DATASET_DATA_LAYOUT*: `NCHW` + - Workflow: + * `_NHWC` + - Environment variables: + - *CM_DATASET_DATA_LAYOUT*: `NHWC` + - Workflow: + +
+ + + * Group "**dataset-precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Environment variables: + - *CM_DATASET_DTYPE*: `fp32` + - *CM_DATASET_INPUT_DTYPE*: `fp32` + - *CM_DATASET_QUANTIZE*: `0` + - *CM_DATASET_CONVERT_TO_UNSIGNED*: `0` + - Workflow: + * `_int8` + - Environment variables: + - *CM_DATASET_DTYPE*: `int8` + - *CM_DATASET_INPUT_DTYPE*: `fp32` + - *CM_DATASET_QUANTIZE*: `1` + - *CM_DATASET_CONVERT_TO_UNSIGNED*: `0` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_DATASET_DTYPE*: `uint8` + - *CM_DATASET_INPUT_DTYPE*: `fp32` + - *CM_DATASET_QUANTIZE*: `1` + - *CM_DATASET_CONVERT_TO_UNSIGNED*: `1` + - Workflow: + +
+ + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_PATH*: `<<>>` + - *CM_DATASET_ANNOTATIONS_FILE_PATH*: `<<>>` + - *CM_DATASET_TYPE*: `calibration` + - Workflow: + * **`_validation`** (default) + - Environment variables: + - *CM_DATASET_TYPE*: `validation` + - Workflow: + +
+ + + * Group "**extension**" +
+ Click here to expand this section. + + * `_npy` + - Environment variables: + - *CM_DATASET_PREPROCESSED_EXTENSION*: `npy` + - Workflow: + * `_raw` + - Environment variables: + - *CM_DATASET_PREPROCESSED_EXTENSION*: `raw` + - Workflow: + * `_rgb32` + - Environment variables: + - *CM_DATASET_PREPROCESSED_EXTENSION*: `rgb32` + - Workflow: + * `_rgb8` + - Environment variables: + - *CM_DATASET_PREPROCESSED_EXTENSION*: `rgb8` + - Workflow: + +
+ + + * Group "**filter-size**" +
+ Click here to expand this section. + + * `_filter-size.#` + - Workflow: + +
+ + + * Group "**interpolation-method**" +
+ Click here to expand this section. + + * `_inter.area` + - Environment variables: + - *CM_DATASET_INTERPOLATION_METHOD*: `INTER_AREA` + - Workflow: + * `_inter.linear` + - Environment variables: + - *CM_DATASET_INTERPOLATION_METHOD*: `INTER_LINEAR` + - Workflow: + +
+ + + * Group "**preprocessing-source**" +
+ Click here to expand this section. + + * `_generic-preprocessor` + - Environment variables: + - *CM_DATASET_REFERENCE_PREPROCESSOR*: `0` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + * CM names: `--adr.['torch', 'pytorch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + * CM names: `--adr.['torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Read "prehook_deps" on other CM scripts*** + * get,generic,image-preprocessor + - CM script: [get-preprocesser-script-generic](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocesser-script-generic) + * **`_mlcommons-reference-preprocessor`** (default) + - Environment variables: + - *CM_DATASET_REFERENCE_PREPROCESSOR*: `1` + - Workflow: + +
+ + +#### Default variations + +`_50,_NCHW,_default-annotations,_fp32,_mlcommons-reference-preprocessor,_validation` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` +* `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET: `OPENIMAGES` +* CM_DATASET_DTYPE: `fp32` +* CM_DATASET_INPUT_SQUARE_SIDE: `800` +* CM_DATASET_CROP_FACTOR: `100.0` +* CM_DATASET_QUANT_SCALE: `1` +* CM_DATASET_QUANTIZE: `0` +* CM_DATASET_QUANT_OFFSET: `0` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,object-detection,openimages,original + * CM names: `--adr.['original-dataset']...` + - CM script: [get-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages) + * mlperf,mlcommons,inference,source,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_pycocotools + * CM names: `--adr.['pycocotools']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_opencv-python + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pillow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.ujson + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + * CM names: `--adr.['numpy']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + * CM names: `--adr.['numpy']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openimages/_cm.json) + +___ +### Script output +`cmr "get dataset openimages open-images object-detection preprocessed [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_ANNOTATIONS_DIR_PATH` +* `CM_DATASET_ANNOTATIONS_FILE_PATH` +* `CM_DATASET_PREPROCESSED_IMAGENAMES_LIST` +* `CM_DATASET_PREPROCESSED_IMAGES_LIST` +* `CM_DATASET_PREPROCESSED_PATH` +* `CM_DATASET_QUANT_OFFSET` +* `CM_DATASET_QUANT_SCALE` +* `CM_DATASET_TYPE` \ No newline at end of file diff --git a/script/get-preprocessed-dataset-openimages/_cm.json b/script/get-preprocessed-dataset-openimages/_cm.json new file mode 100644 index 0000000000..0f796a7c6d --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/_cm.json @@ -0,0 +1,351 @@ +{ + "alias": "get-preprocessed-dataset-openimages", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "names": [ + "original-dataset" + ], + "tags": "get,dataset,object-detection,openimages,original" + }, + { + "names": [ + "inference-src" + ], + "tags": "mlperf,mlcommons,inference,source,src" + }, + { + "names": [ + "pycocotools" + ], + "tags": "get,generic-python-lib,_pycocotools" + }, + { + "tags": "get,generic-python-lib,_opencv-python" + }, + { + "tags": "get,generic-python-lib,_pillow" + }, + { + "tags": "get,generic-python-lib,_package.ujson" + }, + { + "names": [ "numpy" ], + "tags": "get,generic-python-lib,_numpy" + }, + { + "names": [ "numpy" ], + "tags": "get,generic-python-lib,_numpy" + } + ], + "default_env": { + "CM_DATASET": "OPENIMAGES", + "CM_DATASET_DTYPE": "fp32", + "CM_DATASET_INPUT_SQUARE_SIDE": "800", + "CM_DATASET_CROP_FACTOR": "100.0", + "CM_DATASET_QUANT_SCALE": "1", + "CM_DATASET_QUANTIZE": "0", + "CM_DATASET_QUANT_OFFSET": "0" + }, + "input_mapping": { + "dir": "CM_DATASET_PREPROCESSED_PATH", + "threads": "CM_NUM_PREPROCESS_THREADS" + }, + "new_env_keys": [ + "CM_DATASET_*" + ], + "tags": [ + "get", + "dataset", + "openimages", + "open-images", + "object-detection", + "preprocessed" + ], + "uid": "9842f1be8cba4c7b", + "variations": { + "generic-preprocessor": { + "group": "preprocessing-source", + "deps": [ + { + "tags": "get,generic-python-lib,_torch", + "names": [ + "torch", "pytorch" + ] + }, + { + "tags": "get,generic-python-lib,_torchvision", + "names": [ + "torchvision" + ] + } + ], + "prehook_deps": [ + { + "tags": "get,generic,image-preprocessor" + } + ], + "env": { + "CM_DATASET_REFERENCE_PREPROCESSOR": "0" + } + }, + "mlcommons-reference-preprocessor": { + "group": "preprocessing-source", + "default": true, + "env": { + "CM_DATASET_REFERENCE_PREPROCESSOR": "1" + } + }, + "size.#": { + "group": "dataset-count", + "env": { + "CM_DATASET_SIZE": "#" + }, + "ad": { + "original-dataset": { + "tags": "_size.#" + } + } + }, + "50": { + "group": "dataset-count", + "default": true, + "env": { + "CM_DATASET_SIZE": "50" + }, + "ad": { + "original-dataset": { + "tags": "_50" + } + } + }, + "500": { + "group": "dataset-count", + "env": { + "CM_DATASET_SIZE": "500" + }, + "ad": { + "original-dataset": { + "tags": "_500" + } + } + }, + "full": { + "group": "dataset-count" + }, + "full,validation": { + "ad": { + "original-dataset": { + "tags": "_full" + } + }, + "env": { + "CM_DATASET_SIZE": "24781" + } + }, + "NCHW": { + "group": "dataset-layout", + "default": true, + "env": { + "CM_DATASET_DATA_LAYOUT": "NCHW" + } + }, + "NHWC": { + "group": "dataset-layout", + "env": { + "CM_DATASET_DATA_LAYOUT": "NHWC" + } + }, + "calibration": { + "group": "dataset-type", + "ad": { + "original-dataset": { + "tags": "_calibration" + } + }, + "default_variations": { + "dataset-count": "500" + }, + "env": { + "CM_DATASET_PATH": "<<>>", + "CM_DATASET_ANNOTATIONS_FILE_PATH": "<<>>", + "CM_DATASET_TYPE": "calibration" + } + }, + "filter-size.#": { + "group": "filter-size", + "ad": { + "original-dataset": { + "tags": "_filter-size.#" + } + } + }, + "filter": { + "ad": { + "original-dataset": { + "tags": "_filter" + } + } + }, + "filter,calibration": { + "env": { + "CM_DATASET_CALIBRATION_FILTER": "yes" + } + }, + "validation": { + "group": "dataset-type", + "default": true, + "env": { + "CM_DATASET_TYPE": "validation" + }, + "ad": { + "original-dataset": { + "tags": "_validation" + } + } + }, + "nvidia": { + "env": { + "CM_PREPROCESSING_BY_NVIDIA": "yes" + } + }, + "custom-annotations": { + "group": "annotations", + "ad": { + "original-dataset": { + "tags": "_custom-annotations" + } + } + }, + "default-annotations": { + "group": "annotations", + "default": true, + "ad": { + "original-dataset": { + "tags": "_default-annotations" + } + } + }, + "for.retinanet.onnx": { + "default_variations": { + "preprocessing-source": "generic-preprocessor", + "interpolation-method": "inter.linear", + "dataset-layout": "NCHW" + }, + "env": { + "CM_ML_MODEL_NAME": "retinanet", + "CM_DATASET_SUBTRACT_MEANS": "1", + "CM_DATASET_GIVEN_CHANNEL_MEANS": "0.485 0.456 0.406", + "CM_DATASET_GIVEN_CHANNEL_STDS": "0.229 0.224 0.225", + "CM_DATASET_NORMALIZE_DATA": "0", + "CM_DATASET_NORMALIZE_LOWER": "0.0", + "CM_DATASET_NORMALIZE_UPPER": "1.0", + "CM_DATASET_CONVERT_TO_BGR": "0", + "CM_DATASET_CROP_FACTOR": "100.0" + } + }, + "for.retinanet.onnx,fp32": { + "env": { + } + }, + "for.retinanet.onnx,uint8": { + "env": { + "CM_DATASET_QUANT_SCALE": "0.0186584499", + "CM_DATASET_QUANT_OFFSET": "114" + } + }, + "quant-scale.#": { + "const": { + "CM_DATASET_QUANT_SCALE": "#" + } + }, + "quant-offset.#": { + "const": { + "CM_DATASET_QUANT_OFFSET": "#" + } + }, + "inter.linear": { + "group": "interpolation-method", + "env": { + "CM_DATASET_INTERPOLATION_METHOD": "INTER_LINEAR" + } + }, + "inter.area": { + "group": "interpolation-method", + "env": { + "CM_DATASET_INTERPOLATION_METHOD": "INTER_AREA" + } + }, + "rgb8": { + "group": "extension", + "env": { + "CM_DATASET_PREPROCESSED_EXTENSION": "rgb8" + } + }, + "rgb32": { + "group": "extension", + "env": { + "CM_DATASET_PREPROCESSED_EXTENSION": "rgb32" + } + }, + "npy": { + "group": "extension", + "env": { + "CM_DATASET_PREPROCESSED_EXTENSION": "npy" + } + }, + "raw": { + "group": "extension", + "env": { + "CM_DATASET_PREPROCESSED_EXTENSION": "raw" + } + }, + "fp32": { + "group": "dataset-precision", + "default_variations": { + "extension": "raw" + }, + "default": true, + "env": { + "CM_DATASET_DTYPE": "fp32", + "CM_DATASET_INPUT_DTYPE": "fp32", + "CM_DATASET_QUANTIZE": "0", + "CM_DATASET_CONVERT_TO_UNSIGNED": "0" + } + }, + "uint8": { + "group": "dataset-precision", + "default_variations": { + "extension": "rgb8" + }, + "env": { + "CM_DATASET_DTYPE": "uint8", + "CM_DATASET_INPUT_DTYPE": "fp32", + "CM_DATASET_QUANTIZE": "1", + "CM_DATASET_CONVERT_TO_UNSIGNED": "1" + } + }, + "int8": { + "group": "dataset-precision", + "default_variations": { + "extension": "rgb8" + }, + "env": { + "CM_DATASET_DTYPE": "int8", + "CM_DATASET_INPUT_DTYPE": "fp32", + "CM_DATASET_QUANTIZE": "1", + "CM_DATASET_CONVERT_TO_UNSIGNED": "0" + } + } + } +} diff --git a/script/get-preprocessed-dataset-openimages/customize.py b/script/get-preprocessed-dataset-openimages/customize.py new file mode 100644 index 0000000000..fd2adcb5f6 --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/customize.py @@ -0,0 +1,50 @@ +from cmind import utils +import os +import shutil +import glob + +def preprocess(i): + + env = i['env'] + + if 'CM_DATASET_PREPROCESSED_PATH' not in env: + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + + if env.get('CM_DATASET_REFERENCE_PREPROCESSOR',"0") == "1": + print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + + if env.get('CM_ML_MODEL_NAME', '') == 'retinanet': + if env.get('CM_DATASET_QUANTIZE', '') == '1': + if env.get('CM_QAIC_MODEL_RETINANET_IMAGE_SCALE', '') != '': + env['CM_DATASET_QUANT_SCALE'] = env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] + if env.get('CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET', '') != '': + env['CM_DATASET_QUANT_OFFSET'] = env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] + + return {'return': 0} + +def postprocess(i): + + env = i['env'] + + if env["CM_DATASET_TYPE"] == "validation": + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "annotations") + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join(env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") + + # finalize path + preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + preprocessed_images_list = [] + preprocessed_imagenames_list = [] + + match_text = "/*."+env.get("CM_DATASET_PREPROCESSED_EXTENSION","*") + for filename in sorted(glob.glob(preprocessed_path + match_text)): + preprocessed_images_list.append(filename) + preprocessed_imagenames_list.append(os.path.basename(filename)) + with open("preprocessed_files.txt", "w") as f: + f.write("\n".join(preprocessed_images_list)) + with open("preprocessed_filenames.txt", "w") as f: + f.write("\n".join(preprocessed_imagenames_list)) + + env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join(os.getcwd(), "preprocessed_files.txt") + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join(os.getcwd(), "preprocessed_filenames.txt") + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py b/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py new file mode 100644 index 0000000000..4df55f3cd4 --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import math +import os +from PIL import Image +import shutil + +from code.common.fix_sys_path import ScopedRestrictedImport +with ScopedRestrictedImport(): + import numpy as np + import torch + from torchvision.transforms import functional as F + +from code.common import logging +from code.common.image_preprocessor import ImagePreprocessor, center_crop, resize_with_aspectratio + + +def preprocess_openimage_for_retinanet(data_dir, preprocessed_data_dir, formats, overwrite=False, cal_only=False, val_only=False): + def loader(fpath): + loaded_tensor = F.to_tensor(Image.open(fpath).convert("RGB")) + dtype = torch.float32 + device = torch.device("cpu") + image_size = [800, 800] + image_std = [0.229, 0.224, 0.225] + image_mean = [0.485, 0.456, 0.406] + mean = torch.as_tensor(image_mean, dtype=dtype, device=device) + std = torch.as_tensor(image_std, dtype=dtype, device=device) + img_norm = (loaded_tensor - mean[:, None, None]) / std[:, None, None] + img_resize = torch.nn.functional.interpolate(img_norm[None], size=image_size, scale_factor=None, mode='bilinear', + recompute_scale_factor=None, align_corners=False)[0] + img = img_resize.numpy() + return img + + + def quantizer(image): + # Dynamic range of image is [-2.64064, 2.64064] based on calibration cache. + # Calculated by: + # np.uint32(int("3caa54fc", base=16)).view(np.dtype('float32')).item() * 127.0 + max_abs = 2.64064 + image_int8 = image.clip(-max_abs, max_abs) / max_abs * 127.0 + return image_int8.astype(dtype=np.int8, order='C') + + + preprocessor = ImagePreprocessor(loader, quantizer) + if not val_only: + # Preprocess calibration set. FP32 only because calibrator always takes FP32 input. + preprocessor.run(os.path.join(data_dir, "open-images-v6-mlperf", "calibration", "train", "data"), + os.path.join(preprocessed_data_dir, "open-images-v6-mlperf", "calibration", "Retinanet"), + "data_maps/open-images-v6-mlperf/cal_map.txt", ["fp32"], overwrite) + if not cal_only: + # Preprocess validation set. + preprocessor.run(os.path.join(data_dir, "open-images-v6-mlperf", "validation", "data"), + os.path.join(preprocessed_data_dir, "open-images-v6-mlperf", "validation", "Retinanet"), + "data_maps/open-images-v6-mlperf/val_map.txt", formats, overwrite) + + +def copy_openimage_annotations(data_dir, preprocessed_data_dir): + src_dir = os.path.join(data_dir, "open-images-v6-mlperf/annotations") + dst_dir = os.path.join(preprocessed_data_dir, "open-images-v6-mlperf/annotations") + if not os.path.exists(dst_dir): + shutil.copytree(src_dir, dst_dir) + + +def main(): + # Parse arguments to identify the data directory with the input images + # and the output directory for the preprocessed images. + # The data dicretory is assumed to have the following structure: + # + # └── coco + # ├── annotations + # ├── calibration + # └── validation + # And the output directory will have the following structure: + # + # └── open-images-v6-mlperf + # ├── annotations + # ├── calibration + # │ └── Retinanet + # │ └── fp32 + # └── validation + # └── Retinanet + # └── int8_linear + parser = argparse.ArgumentParser() + parser.add_argument( + "--data_dir", "-d", + help="Specifies the directory containing the input images.", + default="build/data" + ) + parser.add_argument( + "--preprocessed_data_dir", "-o", + help="Specifies the output directory for the preprocessed data.", + default="build/preprocessed_data" + ) + parser.add_argument( + "--formats", "-t", + help="Comma-separated list of formats. Choices: fp32, int8_linear, int8_chw4.", + default="default" + ) + parser.add_argument( + "--overwrite", "-f", + help="Overwrite existing files.", + action="store_true" + ) + parser.add_argument( + "--cal_only", + help="Only preprocess calibration set.", + action="store_true" + ) + parser.add_argument( + "--val_only", + help="Only preprocess validation set.", + action="store_true" + ) + args = parser.parse_args() + data_dir = args.data_dir + preprocessed_data_dir = args.preprocessed_data_dir + formats = args.formats.split(",") + overwrite = args.overwrite + cal_only = args.cal_only + val_only = args.val_only + default_formats = ["int8_linear"] + + # Now, actually preprocess the input images + logging.info("Loading and preprocessing images. This might take a while...") + if args.formats == "default": + formats = default_formats + preprocess_openimage_for_retinanet(data_dir, preprocessed_data_dir, formats, overwrite, cal_only, val_only) + + # Copy annotations from data_dir to preprocessed_data_dir. + copy_openimage_annotations(data_dir, preprocessed_data_dir) + + logging.info("Preprocessing done.") + + +if __name__ == '__main__': + main() diff --git a/script/get-preprocessed-dataset-openimages/preprocess.py b/script/get-preprocessed-dataset-openimages/preprocess.py new file mode 100644 index 0000000000..b2b05fe1dc --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/preprocess.py @@ -0,0 +1,47 @@ +import os +import sys +import os.path + +mlperf_src_path = os.environ['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] +python_path = os.path.join(mlperf_src_path, "python") +sys.path.insert(0, python_path) + +import openimages +import dataset +import shutil + + +dataset_path = os.environ['CM_DATASET_PATH'] +preprocessed_dir = os.environ.get('CM_DATASET_PREPROCESSED_PATH', os.getcwd()) + +if os.environ.get('CM_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": + #import generic_preprocess + #generic_preprocess.preprocess() + import preprocess_object_detection_dataset as pp + pp.preprocess() +else: + dataset_list = os.environ.get('CM_DATASET_ANNOTATIONS_FILE_PATH', None) + img_format = os.environ.get('CM_DATASET_DATA_LAYOUT', 'NHWC') + count = int(os.environ.get('CM_DATASET_SIZE', 0)) or None + image_width = int(os.environ.get('CM_DATASET_OPENIMAGES_RESIZE', 800)) + threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) + threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) + name="openimages-" + str(image_width) + "-retinanet" + + openimages.OpenImages(data_path=dataset_path, + image_list=dataset_list, + name=name, + image_format=img_format, + pre_process = dataset.pre_process_openimages_retinanet, + use_cache=True, + image_size=[image_width, image_width, 3], + count=count, + threads=threads, + preprocessed_dir=preprocessed_dir) + +if os.environ["CM_DATASET_TYPE"] == "validation": + src_path=os.environ.get('CM_DATASET_ANNOTATIONS_DIR_PATH', os.path.join(dataset_path, "annotations")) + dest_path=os.path.join(preprocessed_dir, "annotations") + + if not os.path.exists(dest_path): + shutil.copytree(src_path, dest_path) diff --git a/script/get-preprocessed-dataset-openimages/run.bat b/script/get-preprocessed-dataset-openimages/run.bat new file mode 100644 index 0000000000..f3ccd2da7b --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/run.bat @@ -0,0 +1 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\preprocess.py diff --git a/script/get-preprocessed-dataset-openimages/run.sh b/script/get-preprocessed-dataset-openimages/run.sh new file mode 100644 index 0000000000..aa660b693e --- /dev/null +++ b/script/get-preprocessed-dataset-openimages/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py diff --git a/script/get-preprocessed-dataset-openorca/README.md b/script/get-preprocessed-dataset-openorca/README.md new file mode 100644 index 0000000000..44386e3a24 --- /dev/null +++ b/script/get-preprocessed-dataset-openorca/README.md @@ -0,0 +1,180 @@ +Automatically generated README for this automation recipe: **get-preprocessed-dataset-openorca** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocessed-dataset-openorca,5614c39cb1564d72) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,openorca,language-processing,preprocessed* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset openorca language-processing preprocessed" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,openorca,language-processing,preprocessed` + +`cm run script --tags=get,dataset,openorca,language-processing,preprocessed[,variations] ` + +*or* + +`cmr "get dataset openorca language-processing preprocessed"` + +`cmr "get dataset openorca language-processing preprocessed [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openorca,language-processing,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,openorca,language-processing,preprocessed"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,openorca,language-processing,preprocessed) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset openorca language-processing preprocessed[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**dataset-type**" +
+ Click here to expand this section. + + * `_calibration` + - Environment variables: + - *CM_DATASET_CALIBRATION*: `yes` + - Workflow: + * **`_validation`** (default) + - Environment variables: + - *CM_DATASET_CALIBRATION*: `no` + - Workflow: + +
+ + + * Group "**size**" +
+ Click here to expand this section. + + * **`_60`** (default) + - Workflow: + * `_full` + - Workflow: + * `_size.#` + - Workflow: + +
+ + +#### Default variations + +`_60,_validation` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DATASET_CALIBRATION: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca/_cm.json)*** + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,dataset,original,openorca + * CM names: `--adr.['openorca-original', 'dataset-original']...` + - CM script: [get-dataset-openorca](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openorca) + * mlperf,inference,source + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_package.pyarrow + * CM names: `--adr.['pyarrow']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.fastparquet + * CM names: `--adr.['fastparquet']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,ml-model,llama2 + - CM script: [get-ml-model-llama2](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-llama2) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-openorca/_cm.json) + +___ +### Script output +`cmr "get dataset openorca language-processing preprocessed [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_PREPROCESSED_PATH` +#### New environment keys auto-detected from customize + +* `CM_DATASET_PREPROCESSED_PATH` \ No newline at end of file diff --git a/script/get-preprocessed-dataset-openorca/_cm.json b/script/get-preprocessed-dataset-openorca/_cm.json new file mode 100644 index 0000000000..bd322112ce --- /dev/null +++ b/script/get-preprocessed-dataset-openorca/_cm.json @@ -0,0 +1,108 @@ +{ + "alias": "get-preprocessed-dataset-openorca", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML datasets", + "category_sort": 8500, + "default_env": { + "CM_DATASET_CALIBRATION": "no" + }, + "deps": [ + { + "tags": "get,sys-utils-cm" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "openorca-original", + "dataset-original" + ], + "tags": "get,dataset,original,openorca" + }, + { + "force_env_keys": [ + "CM_GIT_*" + ], + "names": [ + "inference-src" + ], + "tags": "mlperf,inference,source" + }, + { + "tags": "get,generic-python-lib,_package.pyarrow", + "names": [ + "pyarrow" + ] + }, + { + "tags": "get,generic-python-lib,_package.fastparquet", + "names": [ + "fastparquet" + ] + }, + { + "tags": "get,ml-model,llama2" + } + ], + "env": { + "CM_DATASET": "OPENORCA" + }, + "new_env_keys": [ + "CM_DATASET_PREPROCESSED_PATH" + ], + "tags": [ + "get", + "dataset", + "openorca", + "language-processing", + "preprocessed" + ], + "uid": "5614c39cb1564d72", + "variations": { + "60": { + "default": true, + "ad": { + "dataset-original": { + "tags": "_60" + } + }, + "group": "size" + }, + "calibration": { + "env": { + "CM_DATASET_CALIBRATION": "yes" + }, + "group": "dataset-type" + }, + "full": { + "ad": { + "dataset-original": { + "tags": "_full" + } + }, + "group": "size" + }, + "size.#": { + "ad": { + "dataset-original": { + "tags": "_size.#" + } + }, + "group": "size" + }, + "validation": { + "default": true, + "env": { + "CM_DATASET_CALIBRATION": "no" + }, + "group": "dataset-type" + } + } +} diff --git a/script/get-preprocessed-dataset-openorca/customize.py b/script/get-preprocessed-dataset-openorca/customize.py new file mode 100644 index 0000000000..b5a3219dad --- /dev/null +++ b/script/get-preprocessed-dataset-openorca/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + env = i['env'] + inference_src = env['CM_MLPERF_INFERENCE_SOURCE'] + + run_dir = os.path.join(inference_src, 'language', 'llama2-70b') + model_dir = env['CM_ML_MODEL_PATH'] + run_cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' processorca.py --dataset_pq_path=' + env['CM_DATASET_OPENORCA_PARQUET'] + ' --model_dir=' + model_dir +' --seqlen_limit=2048 --export_dir=' + os.path.join(os.getcwd(), "processed-openorca") + ' --num_total_samples=' + env['CM_DATASET_SIZE'] + + env['CM_RUN_DIR'] = run_dir + env['CM_RUN_CMD'] = run_cmd + + + + return {'return': 0} + +def postprocess(i): + env = i['env'] + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join(os.path.join(os.getcwd(), "processed-openorca", 'open_orca_gpt4_tokenized_llama.sampled_'+env['CM_DATASET_SIZE']+'.pkl')) + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-openorca/run.sh b/script/get-preprocessed-dataset-openorca/run.sh new file mode 100644 index 0000000000..38fe6d64be --- /dev/null +++ b/script/get-preprocessed-dataset-openorca/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd ${CM_RUN_DIR} +echo "${CM_RUN_CMD}" +eval "${CM_RUN_CMD}" diff --git a/script/get-preprocessed-dataset-squad/README.md b/script/get-preprocessed-dataset-squad/README.md new file mode 100644 index 0000000000..862ccfbc31 --- /dev/null +++ b/script/get-preprocessed-dataset-squad/README.md @@ -0,0 +1,240 @@ +Automatically generated README for this automation recipe: **get-preprocessed-dataset-squad** + +Category: **AI/ML datasets** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-preprocessed-dataset-squad,7cd1d9b7e8af4788) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *get,dataset,preprocessed,tokenized,squad* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get dataset preprocessed tokenized squad" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,dataset,preprocessed,tokenized,squad` + +`cm run script --tags=get,dataset,preprocessed,tokenized,squad[,variations] ` + +*or* + +`cmr "get dataset preprocessed tokenized squad"` + +`cmr "get dataset preprocessed tokenized squad [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,preprocessed,tokenized,squad' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,dataset,preprocessed,tokenized,squad"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,dataset,preprocessed,tokenized,squad) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get dataset preprocessed tokenized squad[variations]" ` + +___ +### Customization + + +#### Variations + + * Group "**calibration-set**" +
+ Click here to expand this section. + + * `_calib1` + - Environment variables: + - *CM_DATASET_SQUAD_CALIBRATION_SET*: `one` + - Workflow: + * `_calib2` + - Environment variables: + - *CM_DATASET_SQUAD_CALIBRATION_SET*: `two` + - Workflow: + * **`_no-calib`** (default) + - Environment variables: + - *CM_DATASET_SQUAD_CALIBRATION_SET*: `` + - Workflow: + +
+ + + * Group "**doc-stride**" +
+ Click here to expand this section. + + * `_doc-stride.#` + - Environment variables: + - *CM_DATASET_DOC_STRIDE*: `#` + - Workflow: + * **`_doc-stride.128`** (default) + - Environment variables: + - *CM_DATASET_DOC_STRIDE*: `128` + - Workflow: + +
+ + + * Group "**packing**" +
+ Click here to expand this section. + + * `_packed` + - Environment variables: + - *CM_DATASET_SQUAD_PACKED*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,preprocessed,squad,_pickle + - CM script: [get-preprocessed-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-squad) + +
+ + + * Group "**raw**" +
+ Click here to expand this section. + + * `_pickle` + - Environment variables: + - *CM_DATASET_RAW*: `no` + - Workflow: + * **`_raw`** (default) + - Environment variables: + - *CM_DATASET_RAW*: `yes` + - Workflow: + +
+ + + * Group "**seq-length**" +
+ Click here to expand this section. + + * `_seq-length.#` + - Environment variables: + - *CM_DATASET_MAX_SEQ_LENGTH*: `#` + - Workflow: + * **`_seq-length.384`** (default) + - Environment variables: + - *CM_DATASET_MAX_SEQ_LENGTH*: `384` + - Workflow: + +
+ + +#### Default variations + +`_doc-stride.128,_no-calib,_raw,_seq-length.384` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/_cm.yaml)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlperf,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,squad,dataset,original + * CM names: `--adr.['squad-dataset']...` + - CM script: [get-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad) + * get,squad,vocab + * CM names: `--adr.['squad-vocab']...` + - CM script: [get-bert-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-bert-squad-vocab) + * get,generic-python-lib,_package.tokenization + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.tensorflow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/_cm.yaml) + 1. ***Run native script if exists*** + * [run-packed.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/run-packed.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-preprocessed-dataset-squad/_cm.yaml) + +___ +### Script output +`cmr "get dataset preprocessed tokenized squad [,variations]" -j` +#### New environment keys (filter) + +* `CM_DATASET_SQUAD_TOKENIZED_*` +#### New environment keys auto-detected from customize + +* `CM_DATASET_SQUAD_TOKENIZED_DOC_STRIDE` +* `CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS` +* `CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK` +* `CM_DATASET_SQUAD_TOKENIZED_MAX_QUERY_LENGTH` +* `CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH` +* `CM_DATASET_SQUAD_TOKENIZED_PACKED_FILENAMES_FILE` +* `CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE` +* `CM_DATASET_SQUAD_TOKENIZED_ROOT` +* `CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS` \ No newline at end of file diff --git a/script/get-preprocessed-dataset-squad/_cm.yaml b/script/get-preprocessed-dataset-squad/_cm.yaml new file mode 100644 index 0000000000..cff348c266 --- /dev/null +++ b/script/get-preprocessed-dataset-squad/_cm.yaml @@ -0,0 +1,93 @@ +uid: 7cd1d9b7e8af4788 +alias: get-preprocessed-dataset-squad + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML datasets + +deps: + - tags: get,python3 + names: + - python + - python3 + - tags: get,mlperf,inference,src + names: + - inference-src + - tags: get,squad,dataset,original + names: + - squad-dataset + - tags: get,squad,vocab + names: + - squad-vocab + - tags: get,generic-python-lib,_package.tokenization + - tags: get,generic-python-lib,_package.transformers + - tags: get,generic-python-lib,_package.tensorflow + +env: + CM_DATASET_MAX_QUERY_LENGTH: 64 + +new_env_keys: + - CM_DATASET_SQUAD_TOKENIZED_* + +tags: +- get +- dataset +- preprocessed +- tokenized +- squad + +variations: + calib1: + group: calibration-set + env: + CM_DATASET_SQUAD_CALIBRATION_SET: one + calib2: + group: calibration-set + env: + CM_DATASET_SQUAD_CALIBRATION_SET: two + no-calib: + group: calibration-set + default: true + env: + CM_DATASET_SQUAD_CALIBRATION_SET: '' + raw: + group: raw + default: true + env: + CM_DATASET_RAW: "yes" + pickle: + group: raw + env: + CM_DATASET_RAW: "no" + seq-length.#: + group: seq-length + env: + CM_DATASET_MAX_SEQ_LENGTH: "#" + seq-length.384: + group: seq-length + default: true + env: + CM_DATASET_MAX_SEQ_LENGTH: 384 + doc-stride.#: + group: doc-stride + env: + CM_DATASET_DOC_STRIDE: "#" + doc-stride.128: + group: doc-stride + default: true + env: + CM_DATASET_DOC_STRIDE: 128 + packed: + group: packing + env: + CM_DATASET_SQUAD_PACKED: 'yes' + deps: + - tags: get,preprocessed,squad,_pickle + env: + CM_DATASET_SQUAD_PACKED: '' + inherit_variation_tags: true + skip_inherit_variation_groups: + - packing diff --git a/script/get-preprocessed-dataset-squad/customize.py b/script/get-preprocessed-dataset-squad/customize.py new file mode 100644 index 0000000000..8e0ff47db5 --- /dev/null +++ b/script/get-preprocessed-dataset-squad/customize.py @@ -0,0 +1,62 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "one": + env['DATASET_CALIBRATION_FILE'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'calibration', 'SQuAD-v1.1', 'bert_calibration_features.txt') + env['DATASET_CALIBRATION_ID'] = 1 + elif env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "two": + env['DATASET_CALIBRATION_FILE'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'calibration', 'SQuAD-v1.1', 'bert_calibration_qas_ids.txt') + env['DATASET_CALIBRATION_ID'] = 2 + else: + env['DATASET_CALIBRATION_FILE'] = "''" + env['DATASET_CALIBRATION_ID'] = 0 + + env['CK_ENV_MLPERF_INFERENCE'] = env['CM_MLPERF_INFERENCE_SOURCE'] + + if env.get('CM_DATASET_SQUAD_PACKED', '') == "yes": + i['run_script_input']['script_name'] = "run-packed" + if env.get('+PYTHONPATH', '') == '': + env['+PYTHONPATH'] = [] + + env['+PYTHONPATH'].append(env['CM_MLPERF_INFERENCE_BERT_PATH']) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + cur = os.getcwd() + + if env.get('CM_DATASET_SQUAD_PACKED', '') != "yes": + env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] = cur + if env.get('CM_DATASET_RAW', '') == "yes": + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS'] = os.path.join(cur, 'bert_tokenized_squad_v1_1_input_ids.raw') + env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'] = os.path.join(cur, 'bert_tokenized_squad_v1_1_segment_ids.raw') + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK'] = os.path.join(cur, 'bert_tokenized_squad_v1_1_input_mask.raw') + else: + env['CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE'] = os.path.join(cur, 'bert_tokenized_squad_v1_1.pickle') + + env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] = env['CM_DATASET_MAX_SEQ_LENGTH'] + env['CM_DATASET_SQUAD_TOKENIZED_DOC_STRIDE'] = env['CM_DATASET_DOC_STRIDE'] + env['CM_DATASET_SQUAD_TOKENIZED_MAX_QUERY_LENGTH'] = env['CM_DATASET_MAX_QUERY_LENGTH'] + + else: + with open("packed_filenames.txt", "w") as f: + for dirname in os.listdir(cur): + if os.path.isdir(dirname) and not dirname.startswith("_"): + f.write(os.path.join(cur, dirname, "input_ids.raw") + "," + os.path.join(cur, dirname, "input_mask.raw") + "," + os.path.join(cur, dirname, "segment_ids.raw") + "," + os.path.join(cur, dirname, "input_position_ids.raw")+ "\n") + env['CM_DATASET_SQUAD_TOKENIZED_PACKED_FILENAMES_FILE'] = os.path.join(cur, "packed_filenames.txt") + + return {'return':0} diff --git a/script/get-preprocessed-dataset-squad/run-packed.sh b/script/get-preprocessed-dataset-squad/run-packed.sh new file mode 100644 index 0000000000..776c351425 --- /dev/null +++ b/script/get-preprocessed-dataset-squad/run-packed.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD + +run "wget --no-check-certificate -nc https://raw.githubusercontent.com/graphcore/examples/v3.2.0/tutorials/blogs_code/packedBERT/spfhp.py" +run "wget --no-check-certificate -nc https://raw.githubusercontent.com/arjunsuresh/ck-qaic/main/package/model-qaic-calibrate-bert/pack.py" +run "${CM_PYTHON_BIN_WITH_PATH} pack.py ${CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE} ./ ${CM_DATASET_MAX_SEQ_LENGTH}" diff --git a/script/get-preprocessed-dataset-squad/run.sh b/script/get-preprocessed-dataset-squad/run.sh new file mode 100644 index 0000000000..94b008eac9 --- /dev/null +++ b/script/get-preprocessed-dataset-squad/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD +run "wget --no-check-certificate -nc https://raw.githubusercontent.com/krai/ck-mlperf/master/package/dataset-squad-tokenized_for_bert/tokenize_and_pack.py" + +run "${CM_PYTHON_BIN_WITH_PATH} tokenize_and_pack.py \ + ${CM_DATASET_SQUAD_VAL_PATH} \ + ${CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH} \ + ${CUR}/bert_tokenized_squad_v1_1 \ + ${CM_DATASET_MAX_SEQ_LENGTH} \ + ${CM_DATASET_MAX_QUERY_LENGTH} \ + ${CM_DATASET_DOC_STRIDE} \ + ${CM_DATASET_RAW} \ + ${DATASET_CALIBRATION_FILE} \ + ${DATASET_CALIBRATION_ID}" + diff --git a/script/get-python3/README-extra.md b/script/get-python3/README-extra.md new file mode 100644 index 0000000000..fcf6890788 --- /dev/null +++ b/script/get-python3/README-extra.md @@ -0,0 +1,70 @@ +# Detect or install python + +## New ENV + +* CM_PYTHON_BIN +* CM_PYTHON_BIN_WITH_PATH +* CM_PYTHON_VERSION +* CM_PYTHON_CACHE_TAGS + +* PATH +* LD_LIBRARY_PATH +* C_INCLUDE_PATH + +## New state + + +# CLI + +## Default +```bash +cm run script "get python" +``` +or +```bash +cm run script --tags=get,python +``` + +## Version + +```bash +cm run script "get python" --version=3.10.6 +``` + +## Version min +```bash +cm run script "get python" --version_min=3.9 +``` + +## Version max +```bash +cm run script "get python" --version_max=3.9.999 --version_max_usable=3.9.12 +``` + +## Detect python3 in non-standard path +```bash +cm run script "get python" --path={directory with python3} +``` + +### Detect python with non-standard name +```bash +cm run script "get python" --input={full path to python} +``` + +## Force new detection even if python is already found and cached +```bash +cm run script "get python" --new +``` + +## Test + +```bash +cm run script "print python hello-world" +``` + +## Reproducibility matrix + +*Test detection and installation on different platforms:* + +* Windows, Linux, MacOS + diff --git a/script/get-python3/README.md b/script/get-python3/README.md new file mode 100644 index 0000000000..227637668a --- /dev/null +++ b/script/get-python3/README.md @@ -0,0 +1,170 @@ +Automatically generated README for this automation recipe: **get-python3** + +Category: **Python automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-python3,d0b5dd74373f4a62) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,python,python3,get-python,get-python3* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get python python3 get-python get-python3" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,python,python3,get-python,get-python3` + +`cm run script --tags=get,python,python3,get-python,get-python3[,variations] ` + +*or* + +`cmr "get python python3 get-python get-python3"` + +`cmr "get python python3 get-python get-python3 [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,python,python3,get-python,get-python3' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,python,python3,get-python,get-python3"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,python,python3,get-python,get-python3) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get python python3 get-python get-python3[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_conda.#` + - Environment variables: + - *CM_PYTHON_CONDA*: `yes` + - *CM_PYTHON_INSTALL_CACHE_TAGS*: `_conda.#` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic,conda-package,_name.#,_package.python + * CM names: `--adr.['conda-package', 'conda-python']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * `_custom-path.#` + - Environment variables: + - *CM_PYTHON_BIN_WITH_PATH*: `#` + - Workflow: + * `_lto` + - Workflow: + * `_optimized` + - Workflow: + * `_shared` + - Workflow: + * `_with-custom-ssl` + - Workflow: + * `_with-ssl` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/_cm.json)*** + * install,python,src + * `if (CM_REQUIRE_INSTALL == yes)` + - CM script: [install-python-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-python-src) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-python3/_cm.json) + +___ +### Script output +`cmr "get python python3 get-python get-python3 [,variations]" -j` +#### New environment keys (filter) + +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_PYTHON_*` +#### New environment keys auto-detected from customize + +* `CM_PYTHON_BIN` +* `CM_PYTHON_BIN_PATH` +* `CM_PYTHON_BIN_WITH_PATH` +* `CM_PYTHON_CACHE_TAGS` +* `CM_PYTHON_MAJOR_VERSION` +* `CM_PYTHON_MINOR_VERSION` +* `CM_PYTHON_PATCH_VERSION` \ No newline at end of file diff --git a/script/get-python3/_cm.json b/script/get-python3/_cm.json new file mode 100644 index 0000000000..470fff1004 --- /dev/null +++ b/script/get-python3/_cm.json @@ -0,0 +1,75 @@ +{ + "alias": "get-python3", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Python automation", + "clean_files": [], + "extra_cache_tags_from_env": [ + { + "env": "CM_PYTHON_INSTALL_CACHE_TAGS", + "prefix": "python-" + } + ], + "new_env_keys": [ + "CM_PYTHON_*", + "+LD_LIBRARY_PATH", + "+C_INCLUDE_PATH", + "+PATH" + ], + "new_state_keys": [ + "script_prefix" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "inherit_variation_tags": true, + "reuse_version": true, + "tags": "install,python,src" + } + ], + "tags": [ + "get", + "python", + "python3", + "get-python", + "get-python3" + ], + "uid": "d0b5dd74373f4a62", + "variations": { + "custom-path.#": { + "env": { + "CM_PYTHON_BIN_WITH_PATH": "#" + } + }, + "conda.#": { + "deps": [ + { + "tags": "get,generic,conda-package,_name.#,_package.python", + "names": [ + "conda-package", + "conda-python" + ] + } + ], + "adr": { + "pip-package": { + "tags": "_conda.#" + } + }, + "env": { + "CM_PYTHON_CONDA": "yes", + "CM_PYTHON_INSTALL_CACHE_TAGS": "_conda.#" + } + }, + "lto": {}, + "optimized": {}, + "shared": {}, + "with-ssl": {}, + "with-custom-ssl": {} + } +} diff --git a/script/get-python3/customize.py b/script/get-python3/customize.py new file mode 100644 index 0000000000..3e2cc0b6f4 --- /dev/null +++ b/script/get-python3/customize.py @@ -0,0 +1,130 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get('CM_PYTHON_CONDA', '') == 'yes': + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_BIN_PATH'], "python") + + recursion_spaces = i['recursion_spaces'] + + # we need to understand whether this script is called first and CM_PYTHON_BIN_WITH_PATH is empty + # then we should search for related artifacts (python in our case) + # or this script is called after install-python* and CM_PYTHON_BIN_WITH_PATH is set there + # then we do not search for an artifact (python) but pick it up from the installation + + if 'CM_PYTHON_BIN_WITH_PATH' not in env: + #file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python[0-9|\.]*$' + file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' + extra_paths = {"include" : "+C_INCLUDE_PATH", "lib" : "+LD_LIBRARY_PATH"} + + r = i['automation'].find_artifact({'file_name': file_name, + 'default_path_env_key': 'PATH', + 'env': env, + 'os_info':os_info, + # this key defines env key with paths where to find an artifact + 'detect_version':True, + # the next key is used in run.sh to detect python version + 'env_path_key':'CM_PYTHON_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':i['recursion_spaces'], + 'extra_paths': extra_paths + }) + if r['return']>0: + if r['return'] == 16 and os_info['platform'] != 'windows': + # If artifact is not found and we are not on windows + # we should try to install python from src + # in prehook_deps + env['CM_REQUIRE_INSTALL'] = "yes" + + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'Python\s*([\d.]+)', + 'group_number': 1, + 'env_key':'CM_PYTHON_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + os_info = i['os_info'] + + r = detect_version(i) + if r['return'] >0: return r + + version = r['version'] + + found_file_path = env['CM_PYTHON_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_PYTHON_BIN'] = os.path.basename(found_file_path) + env['CM_PYTHON_BIN_PATH'] = os.path.dirname(found_file_path) + + # Save tags that can be used to specialize further dependencies (such as python packages) + tags = 'version-'+version + + add_extra_cache_tags = [] + + extra_tags = env.get('CM_EXTRA_CACHE_TAGS','') + if extra_tags != '': + tags += ',' + extra_tags + + # Check if called from virtual env installer + from_virtual = True if 'virtual' in extra_tags.split(',') else False + + if not from_virtual: + tags += ',non-virtual' + + env['CM_PYTHON_CACHE_TAGS'] = tags + + add_extra_cache_tags = tags.split(',') + + # Check if need to add path, include and lib to env + # (if not in default paths) + default_path_list = i['automation'].get_default_path_list(i) + found_path_root = os.path.dirname(found_path) + + if from_virtual: + # Clean PATH (it will be in activate script) + # but keep LD_LIBRARY_PATH and C_INCLUDE_PATH from the native python + for k in ['+PATH']: + if k in env: + del(env[k]) + + elif os_info['platform'] == 'windows': + extra_path = os.path.join(found_path, 'Scripts') + + if extra_path not in default_path_list and extra_path+os.sep not in default_path_list: + paths = env.get('+PATH',[]) + if extra_path not in paths: + paths.append(extra_path) + env['+PATH']=paths + + version_split = version.split(".") + python_major_version = version_split[0] + python_minor_version = version_split[1] + if len(version_split) > 2: + python_patch_version = version_split[2] + + env['CM_PYTHON_MAJOR_VERSION'] = python_major_version + env['CM_PYTHON_MINOR_VERSION'] = python_minor_version + env['CM_PYTHON_PATCH_VERSION'] = python_patch_version + + return {'return':0, 'version': version, 'add_extra_cache_tags':add_extra_cache_tags} diff --git a/script/get-python3/run.bat b/script/get-python3/run.bat new file mode 100644 index 0000000000..515d6849b4 --- /dev/null +++ b/script/get-python3/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-python3/run.sh b/script/get-python3/run.sh new file mode 100644 index 0000000000..28cf477f25 --- /dev/null +++ b/script/get-python3/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} --version > tmp-ver.out 2>&1 +test $? -eq 0 || exit 1 + +#PYTHON_BIN_PATH="${python_bin%/*}" +# +#if [[ ! -f ${PYTHON_BIN_PATH}/python ]]; then +# echo "Creating softlink of python to python3" +# cmd="sudo ln -s ${python_bin} ${PYTHON_BIN_PATH}/python" +# echo $cmd +# eval $cmd +#fi diff --git a/script/get-qaic-apps-sdk/README.md b/script/get-qaic-apps-sdk/README.md new file mode 100644 index 0000000000..f235fbfcce --- /dev/null +++ b/script/get-qaic-apps-sdk/README.md @@ -0,0 +1,126 @@ +Automatically generated README for this automation recipe: **get-qaic-apps-sdk** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-qaic-apps-sdk,0a9e206af6764da9) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-apps-sdk)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk` + +`cm run script --tags=get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk ` + +*or* + +`cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk"` + +`cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get detect qaic apps sdk apps-sdk qaic-apps-sdk" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-apps-sdk/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-apps-sdk/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-apps-sdk/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-apps-sdk/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-apps-sdk/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-apps-sdk/_cm.json) + +___ +### Script output +`cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk " -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_QAIC_EXEC_PATH` +#### New environment keys auto-detected from customize + +* `CM_QAIC_EXEC_PATH` \ No newline at end of file diff --git a/script/get-qaic-apps-sdk/_cm.json b/script/get-qaic-apps-sdk/_cm.json new file mode 100644 index 0000000000..96890123f9 --- /dev/null +++ b/script/get-qaic-apps-sdk/_cm.json @@ -0,0 +1,35 @@ +{ + "alias": "get-qaic-apps-sdk", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "deps": [ + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "+PATH", + "CM_QAIC_EXEC_PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "detect", + "qaic", + "apps", + "sdk", + "apps-sdk", + "qaic-apps-sdk" + ], + "uid": "0a9e206af6764da9", + "variations": {}, + "versions": {} +} diff --git a/script/get-qaic-apps-sdk/customize.py b/script/get-qaic-apps-sdk/customize.py new file mode 100644 index 0000000000..b84d58b178 --- /dev/null +++ b/script/get-qaic-apps-sdk/customize.py @@ -0,0 +1,109 @@ +from cmind import utils +import os +import xml.etree.ElementTree as et + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + apps_sdk_path = None + + if env.get('CM_INPUT','').strip() != '': + path = env['CM_INPUT'] + if os.path.exists(os.path.join(path, "exec", "qaic-exec")): + apps_sdk_path = path + else: + return {'return':1, 'error': 'exec/qaic-exec not found in the input path (--input)'} + else: + path = "/opt/qti-aic/" + if os.path.exists(os.path.join(path, "exec", "qaic-exec")): + apps_sdk_path = path + + if not apps_sdk_path: + return {'return':1, 'error': f'qaic-exec not found in the default path: {path}'} + + env['CM_QAIC_APPS_SDK_PATH'] = path + env['CM_QAIC_EXEC_PATH'] = os.path.join(path, "exec", "qaic-exec") + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def detect_version(i): + + env = i['env'] + sdk_path = env['CM_QAIC_APPS_SDK_PATH'] + version = None + version_xml_path = os.path.join(sdk_path, "versions", "apps.xml") + version_info = et.parse(version_xml_path) + + versions = version_info.getroot() + build_id = None + + for child1 in versions: + if child1.tag == "ci_build": + for child2 in child1: + if child2.tag == "base_version": + version = child2.text + if child2.tag == "build_id": + build_id = child2.text + if build_id: + version=version+"."+build_id + + if not version: + return {'return':1, 'error': f'qaic apps sdk version info not found'} + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + + if "+PATH" not in env: + env["+PATH"] = [] + + env['+PATH'].append(os.path.dirname(env['CM_QAIC_EXEC_PATH'])) + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [] + lib_paths = [] + + inc_path = os.path.join(env['CM_QAIC_APPS_SDK_PATH'], "dev", "inc") + if os.path.exists(inc_path): + include_paths.append(inc_path) + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + + lib_path = os.path.join(env['CM_QAIC_APPS_SDK_PATH'], "dev", "lib", "x86_64") + if os.path.exists(lib_path): + lib_paths.append(lib_path) + + for lib_path in lib_paths: + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return':0, 'version': version} diff --git a/script/get-qaic-platform-sdk/README.md b/script/get-qaic-platform-sdk/README.md new file mode 100644 index 0000000000..b63abaa41b --- /dev/null +++ b/script/get-qaic-platform-sdk/README.md @@ -0,0 +1,130 @@ +Automatically generated README for this automation recipe: **get-qaic-platform-sdk** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-qaic-platform-sdk,a60f86918dc9457d) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-platform-sdk)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk` + +`cm run script --tags=get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk ` + +*or* + +`cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk"` + +`cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get detect qaic platform sdk platform-sdk qaic-platform-sdk" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-platform-sdk/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-platform-sdk/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-platform-sdk/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-platform-sdk/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-platform-sdk/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-platform-sdk/_cm.json) + +___ +### Script output +`cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk " -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_QAIC_RUNNER_PATH` +* `CM_QAIC_TOOLS_PATH` +#### New environment keys auto-detected from customize + +* `CM_QAIC_RUNNER_PATH` +* `CM_QAIC_TOOLS_PATH` \ No newline at end of file diff --git a/script/get-qaic-platform-sdk/_cm.json b/script/get-qaic-platform-sdk/_cm.json new file mode 100644 index 0000000000..0acdad15a5 --- /dev/null +++ b/script/get-qaic-platform-sdk/_cm.json @@ -0,0 +1,39 @@ +{ + "alias": "get-qaic-platform-sdk", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "deps": [ + { + "tags": "detect,os" + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "+PATH", + "CM_QAIC_RUNNER_PATH", + "CM_QAIC_TOOLS_PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "detect", + "qaic", + "platform", + "sdk", + "platform-sdk", + "qaic-platform-sdk" + ], + "uid": "a60f86918dc9457d", + "variations": {}, + "versions": {} +} diff --git a/script/get-qaic-platform-sdk/customize.py b/script/get-qaic-platform-sdk/customize.py new file mode 100644 index 0000000000..5a68188bd5 --- /dev/null +++ b/script/get-qaic-platform-sdk/customize.py @@ -0,0 +1,110 @@ +from cmind import utils +import os +import xml.etree.ElementTree as et + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + platform_sdk_path = None + + if env.get('CM_INPUT','').strip() != '': + path = env['CM_INPUT'] + if os.path.exists(os.path.join(path, "exec", "qaic-runner")): + platform_sdk_path = path + else: + return {'return':1, 'error': 'exec/qaic-runner not found in the input path (--input)'} + else: + path = "/opt/qti-aic/" + if os.path.exists(os.path.join(path, "exec", "qaic-runner")): + platform_sdk_path = path + + if not platform_sdk_path: + return {'return':1, 'error': f'qaic-runner not found in the default path: {path}'} + + env['CM_QAIC_PLATFORM_SDK_PATH'] = path + env['CM_QAIC_RUNNER_PATH'] = os.path.join(path, "exec", "qaic-runner") + env['CM_QAIC_TOOLS_PATH'] = os.path.join(path, "tools") + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def detect_version(i): + + env = i['env'] + sdk_path = env['CM_QAIC_PLATFORM_SDK_PATH'] + version = None + version_xml_path = os.path.join(sdk_path, "versions", "platform.xml") + version_info = et.parse(version_xml_path) + + versions = version_info.getroot() + build_id = None + + for child1 in versions: + if child1.tag == "ci_build": + for child2 in child1: + if child2.tag == "base_version": + version = child2.text + if child2.tag == "build_id": + build_id = child2.text + if build_id: + version=version+"."+build_id + + if not version: + return {'return':1, 'error': f'qaic platform sdk version info not found'} + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + + if "+PATH" not in env: + env["+PATH"] = [] + + env['+PATH'].append(os.path.dirname(env['CM_QAIC_RUNNER_PATH'])) + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [] + lib_paths = [] + + inc_path = os.path.join(env['CM_QAIC_PLATFORM_SDK_PATH'], "dev", "inc") + if os.path.exists(inc_path): + include_paths.append(inc_path) + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + + lib_path = os.path.join(env['CM_QAIC_PLATFORM_SDK_PATH'], "dev", "lib", env['CM_HOST_PLATFORM_FLAVOR']) + if os.path.exists(lib_path): + lib_paths.append(lib_path) + + for lib_path in lib_paths: + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return':0, 'version': version} diff --git a/script/get-qaic-software-kit/README.md b/script/get-qaic-software-kit/README.md new file mode 100644 index 0000000000..6b78eb1c76 --- /dev/null +++ b/script/get-qaic-software-kit/README.md @@ -0,0 +1,178 @@ +Automatically generated README for this automation recipe: **get-qaic-software-kit** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-qaic-software-kit,3344655922694bbb) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,qaic,software,kit,qaic-software-kit* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get qaic software kit qaic-software-kit" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,qaic,software,kit,qaic-software-kit` + +`cm run script --tags=get,qaic,software,kit,qaic-software-kit[,variations] ` + +*or* + +`cmr "get qaic software kit qaic-software-kit"` + +`cmr "get qaic software kit qaic-software-kit [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,qaic,software,kit,qaic-software-kit' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,qaic,software,kit,qaic-software-kit"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,qaic,software,kit,qaic-software-kit) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get qaic software kit qaic-software-kit[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + +
+ + + * Group "**repo-source**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.quic`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100` + - Workflow: + +
+ + +#### Default variations + +`_repo.quic` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit/_cm.json)*** + * get,git,repo + * CM names: `--adr.['qaic-software-git-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * get,generic,sys-util,_libudev-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libpci-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,google,test + - CM script: [get-google-test](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-google-test) + * get,cmake + * CM names: `--adr.['cmake']...` + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,compiler + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-qaic-software-kit/_cm.json) + +___ +### Script output +`cmr "get qaic software kit qaic-software-kit [,variations]" -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_QAIC_RUNNER_PATH` +* `CM_QAIC_SOFTWARE_KIT_PATH` +#### New environment keys auto-detected from customize + +* `CM_QAIC_RUNNER_PATH` +* `CM_QAIC_SOFTWARE_KIT_PATH` \ No newline at end of file diff --git a/script/get-qaic-software-kit/_cm.json b/script/get-qaic-software-kit/_cm.json new file mode 100644 index 0000000000..2bd1d67d23 --- /dev/null +++ b/script/get-qaic-software-kit/_cm.json @@ -0,0 +1,82 @@ +{ + "alias": "get-qaic-software-kit", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "deps": [ + { + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_repo.": [ "CM_GIT_URL" ], + "_branch.": [ "CM_GIT_CHECKOUT" ] + }, + "names": [ + "qaic-software-git-repo" + ], + "extra_cache_tags": "qaic-software-git-repo,qaic-software,qaic,software,kit" + }, + { + "tags": "get,generic,sys-util,_libudev-dev" + }, + { + "tags": "get,generic,sys-util,_libpci-dev" + }, + { + "tags": "get,google,test" + }, + { + "tags": "get,cmake", + "version_min": "3.24.0", + "names": [ + "cmake" + ] + }, + { + "tags": "get,compiler", + "names": [ + "compiler" + ] + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "+PATH", + "CM_QAIC_SOFTWARE_KIT_PATH", + "CM_QAIC_RUNNER_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "qaic", + "software", + "kit", + "qaic-software-kit" + ], + "uid": "3344655922694bbb", + "variations": { + "repo.quic": { + "group": "repo-source", + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100" + } + }, + "repo.#": { + "group": "repo-source", + "env": { + "CM_GIT_URL": "#" + } + }, + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + } + }, + "versions": {} +} diff --git a/script/get-qaic-software-kit/customize.py b/script/get-qaic-software-kit/customize.py new file mode 100644 index 0000000000..fedeaaf763 --- /dev/null +++ b/script/get-qaic-software-kit/customize.py @@ -0,0 +1,62 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_QAIC_SOFTWARE_KIT_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('+ CXXFLAGS', []) == []: + env['+ CXXFLAGS'] = [] + if env.get('+ CFLAGS', []) == []: + env['+ CFLAGS'] = [] + + if env.get('CM_LLVM_CLANG_VERSION', '') != '': + clang_version_split = env['CM_LLVM_CLANG_VERSION'].split(".") + clang_major_version = int(clang_version_split[0]) + + if clang_major_version >= 17: + env['+ CFLAGS'].append("-Wno-error=c2x-extensions") + + if clang_major_version >= 16: + env['+ CFLAGS'].append("-Wno-error=unused-but-set-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-but-set-variable") + + if clang_major_version >= 13: + env['+ CFLAGS'].append("-Wno-error=unused-const-variable") + env['+ CFLAGS'].append("-Wno-error=unused-but-set-variable") + env['+ CFLAGS'].append("-Wno-error=strict-prototypes") + env['+ CFLAGS'].append("-Wno-error=unused-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-const-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-private-field") + env['+ CXXFLAGS'].append("-Wno-error=unused-result") + env['+ CXXFLAGS'].append("-Wno-error=string-concatenation") + env['+ CXXFLAGS'].append("-Wno-error=infinite-recursion") + + if clang_major_version == 12: + env['+ CXXFLAGS'].append("-Wno-error=unknown-warning-option") + + return {'return':0} + +def postprocess(i): + + env = i['env'] + env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + + if '+PATH' not in env: + env['+PATH'] = [] + + env['+PATH'].append(env['CM_QAIC_RUNNER_PATH']) + env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_RUNNER_PATH'], "qaic-runner") + + return {'return':0} diff --git a/script/get-qaic-software-kit/run.sh b/script/get-qaic-software-kit/run.sh new file mode 100644 index 0000000000..a00122a358 --- /dev/null +++ b/script/get-qaic-software-kit/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +function cmake() { +${CM_CMAKE_BIN_WITH_PATH} $@ +} + +export CC=${CM_C_COMPILER_WITH_PATH} +export CXX=${CM_CXX_COMPILER_WITH_PATH} + +export -f cmake +cd ${CM_QAIC_SOFTWARE_KIT_PATH} +rm -rf build +./bootstrap.sh +test $? -eq 0 || exit $? +cd build +../scripts/build.sh -b Release +test $? -eq 0 || exit $? diff --git a/script/get-rclone/README.md b/script/get-rclone/README.md new file mode 100644 index 0000000000..415c071647 --- /dev/null +++ b/script/get-rclone/README.md @@ -0,0 +1,152 @@ +Automatically generated README for this automation recipe: **get-rclone** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-rclone,22ffb43c49c9419e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,rclone* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get rclone" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,rclone` + +`cm run script --tags=get,rclone[,variations] ` + +*or* + +`cmr "get rclone"` + +`cmr "get rclone [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,rclone' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,rclone"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,rclone) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get rclone[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_gdrive` + - Environment variables: + - *CM_RCLONE_GDRIVE*: `yes` + - Workflow: + * `_system` + - Environment variables: + - *CM_RCLONE_SYSTEM*: `yes` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `1.65.2` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rclone/_cm.json) + +___ +### Script output +`cmr "get rclone [,variations]" -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_RCLONE_BIN_WITH_PATH` +* `CM_RCLONE_CACHE_TAGS` +* `CM_RCLONE_VERSION` +#### New environment keys auto-detected from customize + +* `CM_RCLONE_BIN_WITH_PATH` +* `CM_RCLONE_CACHE_TAGS` \ No newline at end of file diff --git a/script/get-rclone/_cm.json b/script/get-rclone/_cm.json new file mode 100644 index 0000000000..31e1e0a9a5 --- /dev/null +++ b/script/get-rclone/_cm.json @@ -0,0 +1,39 @@ +{ + "alias": "get-rclone", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "deps": [ + { + "tags": "detect,os" + } + ], + "category": "Detection or installation of tools and artifacts", + "default_version": "1.65.2", + "new_env_keys": [ + "CM_RCLONE_CACHE_TAGS", + "CM_RCLONE_BIN_WITH_PATH", + "CM_RCLONE_VERSION", + "+PATH" + ], + "tags": [ + "get", + "rclone" + ], + "uid": "22ffb43c49c9419e", + "variations": { + "gdrive": { + "env": { + "CM_RCLONE_GDRIVE": "yes" + } + }, + "system": { + "env": { + "CM_RCLONE_SYSTEM": "yes" + }, + "warnings": [ + "This CM script will install rclone using sudo/brew!" + ] + } + } +} diff --git a/script/get-rclone/configs/rclone.conf b/script/get-rclone/configs/rclone.conf new file mode 100644 index 0000000000..45699a0a29 --- /dev/null +++ b/script/get-rclone/configs/rclone.conf @@ -0,0 +1,8 @@ +[cm-team] +type = drive +scope = drive.readonly +service_account_file = +team_drive = 0AN8R_ThwUNY8Uk9PVA +shared_with_me = true +root_folder_id = 0AN8R_ThwUNY8Uk9PVA + diff --git a/script/get-rclone/customize.py b/script/get-rclone/customize.py new file mode 100644 index 0000000000..c0c05ca1fe --- /dev/null +++ b/script/get-rclone/customize.py @@ -0,0 +1,124 @@ +from cmind import utils +import os +import configparser + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'rclone.exe' if os_info['platform'] == 'windows' else 'rclone' + env['FILE_NAME'] = file_name + + run_script_input = i['run_script_input'] + automation = i['automation'] + + need_version = env.get('CM_VERSION','') + + host_os_machine = '' + if os_info['platform'] != 'windows': + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + + r = automation.detect_version_using_script({ + 'env': env, + 'run_script_input': run_script_input, + 'recursion_spaces':recursion_spaces}) + + if r['return'] >0: + if r['return'] == 16: + install_script = 'install' + if os_info['platform'] != 'windows' and env.get('CM_RCLONE_SYSTEM','')=='yes': + install_script += '-system' + else: + if os_info['platform'] != 'windows': + x1 = 'arm64' if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch') else 'amd64' + + filebase = 'rclone-v{}-{}-{}' + urlbase = 'https://downloads.rclone.org/v{}/{}' + + if os_info['platform'] == 'darwin': + filename = filebase.format(need_version, 'osx', x1) + elif os_info['platform'] == 'linux': + filename = filebase.format(need_version, 'linux', x1) + + env['CM_RCLONE_URL'] = urlbase.format(need_version, filename+'.zip') + env['CM_RCLONE_ARCHIVE'] = filename + env['CM_RCLONE_ARCHIVE_WITH_EXT'] = filename+'.zip' + + print(recursion_spaces + 'Downloading {}'.format(env['CM_RCLONE_URL'])) + + cur_dir = os.getcwd() + path_bin = os.path.join(cur_dir, file_name) + env['CM_RCLONE_BIN_WITH_PATH'] = path_bin + env['+PATH']=[cur_dir] + + r = automation.run_native_script({'run_script_input':run_script_input, + 'env':env, + 'script_name':install_script}) + if r['return']>0: return r + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'rclone v([\d.]+)', + 'group_number': 1, + 'env_key':'CM_RCLONE_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + gdrive = env.get('CM_RCLONE_GDRIVE', '') + if gdrive == "yes": + config = configparser.ConfigParser() + config_file_path = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "rclone.conf") + + config.read(config_file_path) + config['cm-team']['service_account_file'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "accessfiles", "rclone-gdrive.json") + + default_config_path = os.path.join(os.path.expanduser( '~' ), ".config", "rclone", "rclone.conf") + + default_config = configparser.ConfigParser() + default_config.read(default_config_path) + + for section in config.sections(): + if section not in default_config.sections(): + default_config[section] = config[section] + + with open(default_config_path, 'w') as configfile: + default_config.write(configfile) + print({section: dict(default_config[section]) for section in default_config.sections()}) + + r = detect_version(i) + + if r['return'] >0: return r + + version = r['version'] + + env['CM_RCLONE_CACHE_TAGS'] = 'version-'+version + + file_name = 'rclone.exe' if os_info['platform'] == 'windows' else 'rclone' + + if os_info['platform'] == 'windows' or env.get('CM_RCLONE_SYSTEM','')!='yes': + cur_dir = os.getcwd() + path_bin = os.path.join(cur_dir, file_name) + if os.path.isfile(path_bin): + # Was downloaded and extracted by CM + env['CM_RCLONE_BIN_WITH_PATH'] = path_bin + env['+PATH']=[cur_dir] + + return {'return':0, 'version': version} diff --git a/script/get-rclone/install-system-macos.sh b/script/get-rclone/install-system-macos.sh new file mode 100644 index 0000000000..97f8f41ee9 --- /dev/null +++ b/script/get-rclone/install-system-macos.sh @@ -0,0 +1,3 @@ +#!/bin/bash +brew install rclone +test $? -eq 0 || exit 1 diff --git a/script/get-rclone/install-system.sh b/script/get-rclone/install-system.sh new file mode 100644 index 0000000000..a08dd54fb6 --- /dev/null +++ b/script/get-rclone/install-system.sh @@ -0,0 +1,3 @@ +#!/bin/bash +sudo -v ; curl -k https://rclone.org/install.sh | sudo bash +test $? -eq 0 || exit 1 diff --git a/script/get-rclone/install.bat b/script/get-rclone/install.bat new file mode 100644 index 0000000000..0c12f5c1b5 --- /dev/null +++ b/script/get-rclone/install.bat @@ -0,0 +1,12 @@ +del /Q /S rclone-v%CM_VERSION%-windows-amd64.zip > NUL 2>&1 + +wget --no-check-certificate https://downloads.rclone.org/v%CM_VERSION%/rclone-v%CM_VERSION%-windows-amd64.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip -o rclone-v%CM_VERSION%-windows-amd64.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +move /Y rclone-v%CM_VERSION%-windows-amd64\* . + +del /Q /S rclone-v%CM_VERSION%-windows-amd64.zip > NUL 2>&1 + diff --git a/script/get-rclone/install.sh b/script/get-rclone/install.sh new file mode 100644 index 0000000000..d3f6ede34e --- /dev/null +++ b/script/get-rclone/install.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +rm -rf ${CM_RCLONE_ARCHIVE_WITH_EXT} +rm -rf rclone + +wget ${CM_RCLONE_URL} --no-check-certificate +test $? -eq 0 || exit 1 + +unzip ${CM_RCLONE_ARCHIVE_WITH_EXT} +test $? -eq 0 || exit 1 + +mv ${CM_RCLONE_ARCHIVE}/rclone . +test $? -eq 0 || exit 1 diff --git a/script/get-rclone/run.bat b/script/get-rclone/run.bat new file mode 100644 index 0000000000..e8abbfd95c --- /dev/null +++ b/script/get-rclone/run.bat @@ -0,0 +1,5 @@ +where rclone.exe > NUL 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rclone --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-rclone/run.sh b/script/get-rclone/run.sh new file mode 100644 index 0000000000..4eb6912d72 --- /dev/null +++ b/script/get-rclone/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +echo ${PATH} + +if ! command -v rclone &> /dev/null +then + echo "rclone was not detected" + exit 1 +fi +rclone --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-rocm/README.md b/script/get-rocm/README.md new file mode 100644 index 0000000000..50962358a2 --- /dev/null +++ b/script/get-rocm/README.md @@ -0,0 +1,127 @@ +Automatically generated README for this automation recipe: **get-rocm** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-rocm,23a69f9477cb4dab) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,rocm,get-rocm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get rocm get-rocm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,rocm,get-rocm` + +`cm run script --tags=get,rocm,get-rocm ` + +*or* + +`cmr "get rocm get-rocm"` + +`cmr "get rocm get-rocm " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,rocm,get-rocm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,rocm,get-rocm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,rocm,get-rocm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get rocm get-rocm" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm/_cm.json)*** + * install,rocm + * `if (CM_REQUIRE_INSTALL == yes)` + - CM script: [install-rocm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-rocm) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-rocm/_cm.json) + +___ +### Script output +`cmr "get rocm get-rocm " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_ROCM_*` +#### New environment keys auto-detected from customize + +* `CM_ROCM_CACHE_TAGS` +* `CM_ROCM_INSTALLED_PATH` \ No newline at end of file diff --git a/script/get-rocm/_cm.json b/script/get-rocm/_cm.json new file mode 100644 index 0000000000..cc5601d9d7 --- /dev/null +++ b/script/get-rocm/_cm.json @@ -0,0 +1,29 @@ +{ + "alias": "get-rocm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "clean_files": [], + "new_env_keys": [ + "CM_ROCM_*", + "+PATH" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,rocm" + } + ], + "tags": [ + "get", + "rocm", + "get-rocm" + ], + "uid": "23a69f9477cb4dab" +} diff --git a/script/get-rocm/customize.py b/script/get-rocm/customize.py new file mode 100644 index 0000000000..667c29f4da --- /dev/null +++ b/script/get-rocm/customize.py @@ -0,0 +1,61 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'rocminfo.exe' if os_info['platform'] == 'windows' else 'rocminfo' + env['FILE_NAME'] = file_name + env['CM_TMP_PATH'] = "/opt/rocm/bin" + + if 'CM_ROCM_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_ROCM_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'([\d.]+[-\d+]*)', + 'group_number': 1, + 'env_key':'CM_ROCM_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] >0: return r + + version = r['version'] + found_file_path = env['CM_ROCM_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_ROCM_INSTALLED_PATH'] = found_path + + env['CM_ROCM_CACHE_TAGS'] = 'version-'+version + + return {'return':0, 'version': version} diff --git a/script/get-rocm/run.sh b/script/get-rocm/run.sh new file mode 100644 index 0000000000..f7c8e888cb --- /dev/null +++ b/script/get-rocm/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +dir="${CM_ROCM_BIN_WITH_PATH%/*}/../" +cat ${dir}/.info/version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-spec-ptd/README-extra.md b/script/get-spec-ptd/README-extra.md new file mode 100644 index 0000000000..4061851ca6 --- /dev/null +++ b/script/get-spec-ptd/README-extra.md @@ -0,0 +1,16 @@ +# Get SPEC Power Daemon +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [SPEC Power Daemon](https://github.com/mlcommons/power) used by MLPerf for power measurements. + +## Commands +To install +``` +cm run script --tags=get,mlperf,power,src +``` + +## Exported Variables +* `CM_SPEC_PTD_PATH'`: Path to the PTDaemon +* `CM_MLPERF_PTD_PATH'`: Path to the PTDaemon (same as `CM_SPEC_PTD_DAEMON`) + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-spec-ptd/README.md b/script/get-spec-ptd/README.md new file mode 100644 index 0000000000..a3d397d21d --- /dev/null +++ b/script/get-spec-ptd/README.md @@ -0,0 +1,166 @@ +Automatically generated README for this automation recipe: **get-spec-ptd** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-spec-ptd,7423a878e4524136) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons` + +`cm run script --tags=get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons [--input_flags]` + +*or* + +`cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons"` + +`cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags]` + + + +#### Input Flags + +* --**input**=Path to SPEC PTDaemon (Optional) + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input=value` → `CM_INPUT=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT: `main` +* CM_GIT_DEPTH: `--depth 1` +* CM_GIT_PATCH: `no` +* CM_GIT_RECURSE_SUBMODULES: ` ` +* CM_GIT_URL: `https://github.com/mlcommons/power.git` + +
+ +#### Versions +Default version: `main` + +* `custom` +* `main` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo,_repo.https://github.com/mlcommons/power + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-spec-ptd/_cm.json) + +___ +### Script output +`cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_PTD_PATH` +* `CM_SPEC_PTD_PATH` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_PTD_PATH` +* `CM_SPEC_PTD_PATH` \ No newline at end of file diff --git a/script/get-spec-ptd/_cm.json b/script/get-spec-ptd/_cm.json new file mode 100644 index 0000000000..2e7a2c1cb8 --- /dev/null +++ b/script/get-spec-ptd/_cm.json @@ -0,0 +1,71 @@ +{ + "alias": "get-spec-ptd", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "default_env": { + "CM_GIT_CHECKOUT": "main", + "CM_GIT_DEPTH": "--depth 1", + "CM_GIT_PATCH": "no", + "CM_GIT_RECURSE_SUBMODULES": " ", + "CM_GIT_URL": "https://github.com/mlcommons/power.git" + }, + "default_version": "main", + "input_mapping": { + "input": "CM_INPUT" + }, + "input_description": { + "input": "Path to SPEC PTDaemon (Optional)" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,git,repo,_repo.https://github.com/mlcommons/power", + "extra_cache_tags": "mlperf,power,spec,ptdaemon,ptd", + "force_env_keys": [ + "CM_GIT_*" + ], + "env": { + "CM_GIT_AUTH": "yes", + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_POWER_SOURCE" + } + } + ], + "new_env_keys": [ + "CM_SPEC_PTD_PATH", + "CM_MLPERF_PTD_PATH" + ], + "tags": [ + "get", + "spec", + "ptd", + "ptdaemon", + "power", + "daemon", + "power-daemon", + "mlperf", + "mlcommons" + ], + "uid": "7423a878e4524136", + "versions": { + "custom": { + "env": { + } + }, + "main": { + "env": { + "CM_GIT_CHECKOUT": "main" + } + } + } +} diff --git a/script/get-spec-ptd/customize.py b/script/get-spec-ptd/customize.py new file mode 100644 index 0000000000..250ddd887b --- /dev/null +++ b/script/get-spec-ptd/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + if env['CM_HOST_OS_TYPE'].lower() == "windows": + binary_name = "ptd-windows-x86.exe" + else: + binary_name = "ptd-linux-x86" + if 'CM_MLPERF_PTD_PATH' not in env: + env['CM_MLPERF_PTD_PATH'] = os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'inference_v1.0', binary_name) + env['CM_SPEC_PTD_PATH'] = env['CM_MLPERF_PTD_PATH'] + + return {'return':0} diff --git a/script/get-spec-ptd/run.sh b/script/get-spec-ptd/run.sh new file mode 100644 index 0000000000..f0f2e7eaea --- /dev/null +++ b/script/get-spec-ptd/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [[ -n "${CM_INPUT}" ]]; then + exit 0 +fi + +cd ${CM_MLPERF_POWER_SOURCE} + +chmod +x "inference_v1.0/ptd-linux-x86" +chmod +x "inference_v1.0/ptd-windows-x86.exe" +cd - diff --git a/script/get-sys-utils-cm/README.md b/script/get-sys-utils-cm/README.md new file mode 100644 index 0000000000..4b1e6b0acb --- /dev/null +++ b/script/get-sys-utils-cm/README.md @@ -0,0 +1,158 @@ +Automatically generated README for this automation recipe: **get-sys-utils-cm** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-sys-utils-cm,bc90993277e84b8e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,sys-utils-cm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get sys-utils-cm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,sys-utils-cm` + +`cm run script --tags=get,sys-utils-cm[,variations] [--input_flags]` + +*or* + +`cmr "get sys-utils-cm"` + +`cmr "get sys-utils-cm [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,sys-utils-cm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,sys-utils-cm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,sys-utils-cm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get sys-utils-cm[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_user` + - Environment variables: + - *CM_PYTHON_PIP_USER*: `--user` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--skip=value` → `CM_SKIP_SYS_UTILS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "skip":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/_cm.json) + 1. ***Run native script if exists*** + * [run-arch.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/run-arch.sh) + * [run-debian.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/run-debian.sh) + * [run-macos.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/run-macos.sh) + * [run-rhel.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/run-rhel.sh) + * [run-sles.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/run-sles.sh) + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/run-ubuntu.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-cm/_cm.json) + +___ +### Script output +`cmr "get sys-utils-cm [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `+PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-sys-utils-cm/_cm.json b/script/get-sys-utils-cm/_cm.json new file mode 100644 index 0000000000..a496f42c91 --- /dev/null +++ b/script/get-sys-utils-cm/_cm.json @@ -0,0 +1,38 @@ +{ + "alias": "get-sys-utils-cm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + "CM_CLEAN_DIRS": "bin", + "CM_PACKAGE_WIN_URL": "https://zenodo.org/record/6501550/files/cm-artifact-os-windows-32.zip ; https://www.dropbox.com/s/2y9r2mvtu8tpexk/zlib123dllx64-bin.zip?dl=1", + "CM_SUDO": "sudo" + }, + "input_mapping": { + "skip": "CM_SKIP_SYS_UTILS" + }, + "new_env_keys": [ + "+PATH" + ], + "tags": [ + "get", + "sys-utils-cm" + ], + "uid": "bc90993277e84b8e", + "variations": { + "user": { + "env": { + "CM_PYTHON_PIP_USER": "--user" + } + } + }, + "warnings": [ + "This CM script will install extra OS system utils required for CM automation workflows!" + ] +} diff --git a/script/get-sys-utils-cm/customize.py b/script/get-sys-utils-cm/customize.py new file mode 100644 index 0000000000..e9a1890852 --- /dev/null +++ b/script/get-sys-utils-cm/customize.py @@ -0,0 +1,85 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + cm = automation.cmind + + if env.get('CM_HOST_OS_FLAVOR', '') == 'amzn': + env['CM_PACKAGE_TOOL'] = "yum" + i['run_script_input']['script_name'] = "run-rhel" + + # Test (not needed - will be removed) + if env.get('CM_SKIP_SYS_UTILS','').lower() in [True, 'yes', 'on']: + return {'return':0, 'skip':True} + + +# Windows has moved to get-sys-utils-min and will be always run with "detect,os"! + + if os_info['platform'] == 'windows': + print ('') + + # If windows, download here otherwise use run.sh + +# +# path = os.getcwd() +# +# clean_dirs = env.get('CM_CLEAN_DIRS','').strip() +# if clean_dirs!='': +# import shutil +# for cd in clean_dirs.split(','): +# if cd != '': +# if os.path.isdir(cd): +# print ('Clearning directory {}'.format(cd)) +# shutil.rmtree(cd) +# +# url = env['CM_PACKAGE_WIN_URL'] +# +# urls = [url] if ';' not in url else url.split(';') +# +# print ('') +# print ('Current directory: {}'.format(os.getcwd())) +# +# for url in urls: +# +# url = url.strip() +# +# print ('') +# print ('Downloading from {}'.format(url)) +# +# r = cm.access({'action':'download_file', +# 'automation':'utils,dc2743f8450541e3', +# 'url':url}) +# if r['return']>0: return r +# +# filename = r['filename'] +# +# print ('Unzipping file {}'.format(filename)) +# +# r = cm.access({'action':'unzip_file', +# 'automation':'utils,dc2743f8450541e3', +# 'filename':filename}) +# if r['return']>0: return r +# +# if os.path.isfile(filename): +# print ('Removing file {}'.format(filename)) +# os.remove(filename) +# +# print ('') +# +# # Add to path +# env['+PATH']=[os.path.join(path, 'bin')] +# + else: + print ('') + print ('***********************************************************************') + print ('This script will attempt to install minimal system dependencies for CM.') + print ('Note that you may be asked for your SUDO password ...') + print ('***********************************************************************') + + return {'return':0} diff --git a/script/get-sys-utils-cm/do_pip_installs.sh b/script/get-sys-utils-cm/do_pip_installs.sh new file mode 100644 index 0000000000..cbf7e58579 --- /dev/null +++ b/script/get-sys-utils-cm/do_pip_installs.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +PIP_EXTRA=`python3 -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` +cmd="python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" +echo $cmd +eval $cmd diff --git a/script/get-sys-utils-cm/do_pip_installs.sh.old b/script/get-sys-utils-cm/do_pip_installs.sh.old new file mode 100644 index 0000000000..55a1492492 --- /dev/null +++ b/script/get-sys-utils-cm/do_pip_installs.sh.old @@ -0,0 +1,6 @@ +#!/bin/bash + +PIP_EXTRA=`python3 -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` +cmd="python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" +echo $cmd +eval $cmd diff --git a/script/get-sys-utils-cm/requirements.txt b/script/get-sys-utils-cm/requirements.txt new file mode 100644 index 0000000000..bb2a50df88 --- /dev/null +++ b/script/get-sys-utils-cm/requirements.txt @@ -0,0 +1,5 @@ +requests +numpy +pandas +wheel +giturlparse diff --git a/script/get-sys-utils-cm/run-arch.sh b/script/get-sys-utils-cm/run-arch.sh new file mode 100644 index 0000000000..1a5338c58a --- /dev/null +++ b/script/get-sys-utils-cm/run-arch.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via package manager" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-pacman} + +${CM_SUDO} ${CM_PACKAGE_TOOL} -Syu && \ + ${CM_SUDO} ${CM_PACKAGE_TOOL} -Sy \ + acl autoconf \ + bzip2 \ + ca-certificates curl cmake \ + gcc git g++ \ + libtool \ + zlib \ + patch python python-pip \ + rsync \ + sudo \ + tar \ + unzip \ + vim \ + wget which \ + xz \ + zip + +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-debian.sh b/script/get-sys-utils-cm/run-debian.sh new file mode 100644 index 0000000000..c7de244641 --- /dev/null +++ b/script/get-sys-utils-cm/run-debian.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via sudo apt" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_APT_TOOL=${CM_APT_TOOL:-apt-get} + +${CM_SUDO} ${CM_APT_TOOL} update && \ + ${CM_SUDO} ${CM_APT_TOOL} install -y --no-install-recommends \ + apt-utils \ + git \ + wget \ + curl \ + zip \ + unzip \ + bzip2 \ + zlib1g-dev \ + libbz2-dev \ + openssh-client \ + kmod \ + libmesa-dev \ + libssl-dev \ + vim \ + mc \ + tree \ + gcc \ + g++ \ + tar \ + autoconf \ + autogen \ + libtool \ + make \ + cmake \ + libc6-dev \ + build-essential \ + libbz2-dev \ + libffi-dev \ + liblzma-dev \ + python3 \ + python3-pip \ + python3-dev \ + libtinfo-dev \ + sudo \ + libgl1 \ + libncurses5 + +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-macos.sh b/script/get-sys-utils-cm/run-macos.sh new file mode 100644 index 0000000000..1e5eab4ad5 --- /dev/null +++ b/script/get-sys-utils-cm/run-macos.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +echo "***************************************************" +echo "Installing some system dependencies via brew" + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +brew update && \ + brew install \ + git \ + wget \ + curl \ + zip \ + unzip \ + bzip2 \ + vim \ + mc \ + tree \ + gcc \ + autoconf \ + autogen \ + libtool \ + make \ + cmake \ + openssl \ + readline \ + sqlite3 \ + tar \ + xz \ + zlib \ + python3 + +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-rhel.sh b/script/get-sys-utils-cm/run-rhel.sh new file mode 100644 index 0000000000..f247f807e2 --- /dev/null +++ b/script/get-sys-utils-cm/run-rhel.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via package manager" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +if [[ "$CM_HOST_OS_FLAVOR" == "amzn" ]]; then + ${CM_SUDO} yum groupinstall "Development Tools" +fi + +CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-dnf} + +${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ + ${CM_SUDO} ${CM_PACKAGE_TOOL} --skip-broken install -y \ + acl autoconf \ + bzip2-devel bzip2 \ + ca-certificates curl cmake \ + gcc git g++ \ + libtool libffi-devel libssl-devel\ + zlib-devel \ + libbz2-devel \ + openssh-client \ + make mesa-libGL \ + patch python3 python3-pip python3-devel \ + openssl-devel \ + rsync \ + tar \ + unzip \ + vim \ + wget which \ + xz \ + zip + +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-sles.sh b/script/get-sys-utils-cm/run-sles.sh new file mode 100644 index 0000000000..32cfdbabc9 --- /dev/null +++ b/script/get-sys-utils-cm/run-sles.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via package manager" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-zypper} + +${CM_SUDO} ${CM_PACKAGE_TOOL} install -t pattern devel_basis +${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ + ${CM_SUDO} ${CM_PACKAGE_TOOL} install -y \ + bzip2-devel bzip2 \ + ca-certificates curl cmake \ + gcc git \ + libtool libffi-devel \ + zlib-devel \ + libbz2-devel \ + openssh-client \ + make \ + patch python3 python3-pip python3-devel \ + openssl-devel \ + rsync \ + tar \ + unzip \ + vim \ + wget which \ + xz \ + zip + +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-ubuntu.sh b/script/get-sys-utils-cm/run-ubuntu.sh new file mode 100644 index 0000000000..1c4561e89f --- /dev/null +++ b/script/get-sys-utils-cm/run-ubuntu.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via sudo apt" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_APT_TOOL=${CM_APT_TOOL:-apt-get} + +${CM_SUDO} ${CM_APT_TOOL} update && \ + ${CM_SUDO} DEBIAN_FRONTEND=noninteractive ${CM_APT_TOOL} install -y --no-install-recommends \ + apt-utils \ + git \ + wget \ + curl \ + zip \ + unzip \ + bzip2 \ + libz-dev \ + libbz2-dev \ + openssh-client \ + libssl-dev \ + vim \ + mc \ + tree \ + gcc \ + g++ \ + tar \ + autoconf \ + autogen \ + libtool \ + make \ + cmake \ + libc6-dev \ + build-essential \ + libbz2-dev \ + libffi-dev \ + liblzma-dev \ + python3 \ + python3-pip \ + python3-dev \ + python3-venv \ + libtinfo-dev \ + python-is-python3 \ + sudo \ + libgl1 \ + libncurses5 \ + libjpeg9-dev \ + unzip \ + libgl1-mesa-glx \ + zlib1g-dev + +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-min/README.md b/script/get-sys-utils-min/README.md new file mode 100644 index 0000000000..7197513dd9 --- /dev/null +++ b/script/get-sys-utils-min/README.md @@ -0,0 +1,119 @@ +Automatically generated README for this automation recipe: **get-sys-utils-min** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-sys-utils-min,a9af7714d3d94779) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-min)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,sys-utils-min* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get sys-utils-min" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,sys-utils-min` + +`cm run script --tags=get,sys-utils-min ` + +*or* + +`cmr "get sys-utils-min"` + +`cmr "get sys-utils-min " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,sys-utils-min' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,sys-utils-min"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,sys-utils-min) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get sys-utils-min" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-min/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-min/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-min/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-min/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-sys-utils-min/_cm.json) + +___ +### Script output +`cmr "get sys-utils-min " -j` +#### New environment keys (filter) + +* `+PATH` +#### New environment keys auto-detected from customize diff --git a/script/get-sys-utils-min/_cm.json b/script/get-sys-utils-min/_cm.json new file mode 100644 index 0000000000..d033f8d1c5 --- /dev/null +++ b/script/get-sys-utils-min/_cm.json @@ -0,0 +1,22 @@ +{ + "alias": "get-sys-utils-min", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + ], + "env": { + "CM_CLEAN_DIRS": "bin", + "CM_PACKAGE_WIN_URL": "https://zenodo.org/records/10379926/files/cm-artifact-os-windows-32.zip ; https://www.dropbox.com/s/2y9r2mvtu8tpexk/zlib123dllx64-bin.zip?dl=1 ; https://cKnowledge.org/ai/data/xz-5.2.9-win64.zip", + "CM_SUDO": "sudo" + }, + "new_env_keys": [ + "+PATH" + ], + "tags": [ + "get", + "sys-utils-min" + ], + "uid": "a9af7714d3d94779" +} diff --git a/script/get-sys-utils-min/customize.py b/script/get-sys-utils-min/customize.py new file mode 100644 index 0000000000..a8b9020c50 --- /dev/null +++ b/script/get-sys-utils-min/customize.py @@ -0,0 +1,64 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + cm = automation.cmind + + # If windows, download here otherwise use run.sh + if os_info['platform'] == 'windows': + + path = os.getcwd() + + clean_dirs = env.get('CM_CLEAN_DIRS','').strip() + if clean_dirs!='': + import shutil + for cd in clean_dirs.split(','): + if cd != '': + if os.path.isdir(cd): + print ('Clearning directory {}'.format(cd)) + shutil.rmtree(cd) + + url = env['CM_PACKAGE_WIN_URL'] + + urls = [url] if ';' not in url else url.split(';') + + print ('') + print ('Current directory: {}'.format(os.getcwd())) + + for url in urls: + + url = url.strip() + + print ('') + print ('Downloading from {}'.format(url)) + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url}) + if r['return']>0: return r + + filename = r['filename'] + + print ('Unzipping file {}'.format(filename)) + + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', + 'filename':filename}) + if r['return']>0: return r + + if os.path.isfile(filename): + print ('Removing file {}'.format(filename)) + os.remove(filename) + + print ('') + + # Add to path + env['+PATH']=[os.path.join(path, 'bin')] + + return {'return':0} diff --git a/script/get-tensorrt/README-extra.md b/script/get-tensorrt/README-extra.md new file mode 100644 index 0000000000..9254265113 --- /dev/null +++ b/script/get-tensorrt/README-extra.md @@ -0,0 +1,11 @@ +# Get TensorRT + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs TensorRT when the corrsponding [tar file](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-tar) is provided as an input. + +## How to Use +``` +cm run script --tags=get,tensorrt --tar_file= +``` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-tensorrt/README.md b/script/get-tensorrt/README.md new file mode 100644 index 0000000000..d0562e558d --- /dev/null +++ b/script/get-tensorrt/README.md @@ -0,0 +1,178 @@ +Automatically generated README for this automation recipe: **get-tensorrt** + +Category: **CUDA automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-tensorrt,2a84ca505e4c408d) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,tensorrt,nvidia* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get tensorrt nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,tensorrt,nvidia` + +`cm run script --tags=get,tensorrt,nvidia[,variations] [--input_flags]` + +*or* + +`cmr "get tensorrt nvidia"` + +`cmr "get tensorrt nvidia [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + + +#### Input Flags + +* --**input**=Full path to the installed TensorRT library (nvinfer) +* --**tar_file**=Full path to the TensorRT Tar file downloaded from the Nvidia website (https://developer.nvidia.com/tensorrt) + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,tensorrt,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,tensorrt,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,tensorrt,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get tensorrt nvidia[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_dev` + - Environment variables: + - *CM_TENSORRT_REQUIRE_DEV*: `yes` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input=value` → `CM_INPUT=value` +* `--tar_file=value` → `CM_TENSORRT_TAR_FILE_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tensorrt/_cm.json) + +___ +### Script output +`cmr "get tensorrt nvidia [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `+ LDFLAGS` +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_TENSORRT_*` +#### New environment keys auto-detected from customize + +* `CM_TENSORRT_INSTALL_PATH` +* `CM_TENSORRT_LIB_PATH` +* `CM_TENSORRT_VERSION` \ No newline at end of file diff --git a/script/get-tensorrt/_cm.json b/script/get-tensorrt/_cm.json new file mode 100644 index 0000000000..b1aa40e06a --- /dev/null +++ b/script/get-tensorrt/_cm.json @@ -0,0 +1,51 @@ +{ + "alias": "get-tensorrt", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "CUDA automation", + "clean_files": [], + "default_env": { + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ "python", "python3" ], + "tags": "get,python3" + } + ], + "input_mapping": { + "input": "CM_INPUT", + "tar_file": "CM_TENSORRT_TAR_FILE_PATH" + }, + "input_description": { + "input": "Full path to the installed TensorRT library (nvinfer)", + "tar_file": "Full path to the TensorRT Tar file downloaded from the Nvidia website (https://developer.nvidia.com/tensorrt)" + }, + "new_env_keys": [ + "CM_TENSORRT_*", + "+PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH", + "+ LDFLAGS" + ], + "tags": [ + "get", + "tensorrt", + "nvidia" + ], + "uid": "2a84ca505e4c408d", + "variations": { + "dev": { + "env": { + "CM_TENSORRT_REQUIRE_DEV": "yes" + } + } + }, + "docker": { + } +} diff --git a/script/get-tensorrt/customize.py b/script/get-tensorrt/customize.py new file mode 100644 index 0000000000..b18fe35c3a --- /dev/null +++ b/script/get-tensorrt/customize.py @@ -0,0 +1,142 @@ +from cmind import utils +import os +import tarfile + +def preprocess(i): + + recursion_spaces = i['recursion_spaces'] + + os_info = i['os_info'] + + env = i['env'] + + + #Not enforcing dev requirement for now + if env.get('CM_TENSORRT_TAR_FILE_PATH','')=='' and env.get('CM_TENSORRT_REQUIRE_DEV1', '') != 'yes' and env.get('CM_HOST_PLATFORM_FLAVOR', '') != 'aarch64': + + if os_info['platform'] == 'windows': + extra_pre='' + extra_ext='lib' + else: + extra_pre='lib' + extra_ext='so' + + libfilename = extra_pre + 'nvinfer.' +extra_ext + env['CM_TENSORRT_VERSION'] = 'vdetected' + + if env.get('CM_TMP_PATH', '').strip() != '': + path = env.get('CM_TMP_PATH') + if os.path.exists(os.path.join(path, libfilename)): + env['CM_TENSORRT_LIB_PATH'] = path + return {'return': 0} + + if not env.get('CM_TMP_PATH'): + env['CM_TMP_PATH'] = '' + + if os_info['platform'] == 'windows': + if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'lib') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths)>0: + tmp_paths = ';'.join(paths) + tmp_paths += ';'+os.environ.get('PATH','') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + if env.get('CM_INPUT','').strip()=='': + if env.get('CM_TMP_PATH','').strip()!='': + env['CM_TMP_PATH']+=':' + + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if(os.path.exists(lib_path)): + env['CM_TMP_PATH']+=':'+lib_path + + r = i['automation'].find_artifact({'file_name': libfilename, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'LD_LIBRARY_PATH', + 'detect_version':False, + 'env_path_key':'CM_TENSORRT_LIB_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if os_info['platform'] == 'windows': + return r + else: + return {'return':0} + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is currently not supported!'} + + if env.get('CM_TENSORRT_TAR_FILE_PATH','')=='': + tags = [ "get", "tensorrt" ] + if env.get('CM_TENSORRT_REQUIRE_DEV', '') != 'yes': + tags.append("_dev") + return {'return': 1, 'error': 'Please envoke cmr "' + " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} + + + print ('Untaring file - can take some time ...') + + file_name = "trtexec" + my_tar = tarfile.open(os.path.expanduser(env['CM_TENSORRT_TAR_FILE_PATH'])) + folder_name = my_tar.getnames()[0] + if not os.path.exists(os.path.join(os.getcwd(), folder_name)): + my_tar.extractall() + my_tar.close() + + import re + version_match = re.match(r'TensorRT-(\d.\d.\d.\d)', folder_name) + if not version_match: + return {'return': 1, 'error': 'Extracted TensorRT folder does not seem proper - Version information missing'} + version = version_match.group(1) + + env['CM_TENSORRT_VERSION'] = version + env['CM_TENSORRT_INSTALL_PATH'] = os.path.join(os.getcwd(), folder_name) + env['CM_TENSORRT_LIB_PATH'] = os.path.join(os.getcwd(), folder_name, "lib") + env['CM_TMP_PATH'] = os.path.join(os.getcwd(), folder_name, "bin") + env['+CPLUS_INCLUDE_PATH'] = [ os.path.join(os.getcwd(), folder_name, "include") ] + env['+C_INCLUDE_PATH'] = [ os.path.join(os.getcwd(), folder_name, "include") ] + env['+LD_LIBRARY_PATH'] = [ os.path.join(os.getcwd(), folder_name, "lib") ] + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if '+LD_LIBRARY_PATH' not in env: + env['+LD_LIBRARY_PATH'] = [] + + if '+PATH' not in env: + env['+PATH'] = [] + + if '+ LDFLAGS' not in env: + env['+ LDFLAGS'] = [] + + #if 'CM_TENSORRT_LIB_WITH_PATH' in env: + # tensorrt_lib_path = os.path.dirname(env['CM_TENSORRT_LIB_WITH_PATH']) + if 'CM_TENSORRT_LIB_PATH' in env: + env['+LD_LIBRARY_PATH'].append(env['CM_TENSORRT_LIB_PATH']) + env['+PATH'].append(env['CM_TENSORRT_LIB_PATH']) #for cmake + env['+ LDFLAGS'].append("-L"+env['CM_TENSORRT_LIB_PATH']) + + version = env['CM_TENSORRT_VERSION'] + + return {'return':0, 'version': version} diff --git a/script/get-tensorrt/run.sh b/script/get-tensorrt/run.sh new file mode 100644 index 0000000000..ac3b30a9d0 --- /dev/null +++ b/script/get-tensorrt/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +if [[ ${CM_TENSORRT_VERSION} == 'vdetected' ]]; then + exit 0; +fi + +PIP_EXTRA=`python3 -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` + +version=${CM_TENSORRT_VERSION} +install_dir=${CM_TENSORRT_INSTALL_PATH} +python_version=${CM_PYTHON_VERSION} +python_version_info=(${python_version//./ }) +python_max_version=${python_version_info[0]} +python_min_version=${python_version_info[1]} + +cd ${install_dir}/python +${CM_PYTHON_BIN_WITH_PATH} -m pip install tensorrt-*-cp${python_max_version}${python_min_version}-none-${CM_HOST_OS_TYPE}_${CM_HOST_OS_MACHINE}.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +cd ${install_dir}/uff +${CM_PYTHON_BIN_WITH_PATH} -m pip install uff-0.6.9-py2.py3-none-any.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +cd ${install_dir}/graphsurgeon +${CM_PYTHON_BIN_WITH_PATH} -m pip install graphsurgeon-0.4.6-py2.py3-none-any.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +cd ${install_dir}/onnx_graphsurgeon +${CM_PYTHON_BIN_WITH_PATH} -m pip install onnx_graphsurgeon-0.3.12-py2.py3-none-any.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +#create softlinks for libnvinfer.so.7 and libnvinfer_plugin.so.7 +# https://forums.developer.nvidia.com/t/could-not-load-dynamic-library-libnvinfer-so-7/231606/5 +if [ ! -f "${install_dir}/lib/libnvinfer.so.7" ]; then + ln -s "${install_dir}/lib/libnvinfer.so" "${install_dir}/lib/libnvinfer.so.7" +fi +test $? -eq 0 || exit $? +if [ ! -f "${install_dir}/lib/libnvinfer_plugin.so.7" ]; then + ln -s "${install_dir}/lib/libnvinfer_plugin.so" "${install_dir}/lib/libnvinfer_plugin.so.7" +fi +test $? -eq 0 || exit $? diff --git a/script/get-terraform/README-extra.md b/script/get-terraform/README-extra.md new file mode 100644 index 0000000000..0fc57d5059 --- /dev/null +++ b/script/get-terraform/README-extra.md @@ -0,0 +1,9 @@ +# Get Terraform +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed Terraform on the system and if not found calls the [install script for Terraform](../script/install-terraform-from-src). + +## Exported Variables +* `CM_TERRAFORM_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-terraform/README.md b/script/get-terraform/README.md new file mode 100644 index 0000000000..f15886e9a5 --- /dev/null +++ b/script/get-terraform/README.md @@ -0,0 +1,127 @@ +Automatically generated README for this automation recipe: **get-terraform** + +Category: **Cloud automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-terraform,66b33c38a4d7461e) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,terraform,get-terraform* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get terraform get-terraform" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,terraform,get-terraform` + +`cm run script --tags=get,terraform,get-terraform ` + +*or* + +`cmr "get terraform get-terraform"` + +`cmr "get terraform get-terraform " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,terraform,get-terraform' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,terraform,get-terraform"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,terraform,get-terraform) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get terraform get-terraform" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform/_cm.json)*** + * install,terraform + * `if (CM_REQUIRE_INSTALL == yes)` + - CM script: [install-terraform-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-terraform-from-src) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-terraform/_cm.json) + +___ +### Script output +`cmr "get terraform get-terraform " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_TERRAFORM_*` +#### New environment keys auto-detected from customize + +* `CM_TERRAFORM_CACHE_TAGS` +* `CM_TERRAFORM_INSTALLED_PATH` \ No newline at end of file diff --git a/script/get-terraform/_cm.json b/script/get-terraform/_cm.json new file mode 100644 index 0000000000..ca0e9a4f15 --- /dev/null +++ b/script/get-terraform/_cm.json @@ -0,0 +1,29 @@ +{ + "alias": "get-terraform", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "clean_files": [], + "category": "Cloud automation", + "new_env_keys": [ + "CM_TERRAFORM_*", + "+PATH" + ], + "prehook_deps": [ + { + "enable_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "reuse_version": true, + "tags": "install,terraform" + } + ], + "tags": [ + "get", + "terraform", + "get-terraform" + ], + "uid": "66b33c38a4d7461e" +} diff --git a/script/get-terraform/customize.py b/script/get-terraform/customize.py new file mode 100644 index 0000000000..c091322bc5 --- /dev/null +++ b/script/get-terraform/customize.py @@ -0,0 +1,59 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'terraform.exe' if os_info['platform'] == 'windows' else 'terraform' + env['FILE_NAME'] = file_name + if 'CM_TERRAFORM_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'PATH', + 'detect_version':True, + 'env_path_key':'CM_TERRAFORM_BIN_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return':0} + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'Terraform\s*v([\d.]+)', + 'group_number': 1, + 'env_key':'CM_TERRAFORM_VERSION', + 'which_env':i['env']}) + if r['return'] >0: return r + + version = r['version'] + + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return':0, 'version':version} + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] >0: return r + + version = r['version'] + found_file_path = env['CM_TERRAFORM_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_TERRAFORM_INSTALLED_PATH'] = found_path + + env['CM_TERRAFORM_CACHE_TAGS'] = 'version-'+version + + return {'return':0, 'version': version} diff --git a/script/get-terraform/run.sh b/script/get-terraform/run.sh new file mode 100644 index 0000000000..7e0438bb92 --- /dev/null +++ b/script/get-terraform/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +terraform --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/script/get-tvm-model/README-extra.md b/script/get-tvm-model/README-extra.md new file mode 100644 index 0000000000..0815c5b53d --- /dev/null +++ b/script/get-tvm-model/README-extra.md @@ -0,0 +1,21 @@ +# CM script + +This script starts tuning (if specified) and compilation of any model using Apache TVM. + +## How To +```bash +cm run script --tags=get,tvm-model,_[VARIATION] +``` +where, `[VARIATION]` is one of +1) Frontend frameworks name (`onnx`, `pytorch`, `tensorflow`, `tflite`) +2) Precision (`fp32`, `int8`) +3) TVM Runtime (`virtual_machine` or `graph_executor`) +4) `tune-model` variation if you want to start tuning the model using TVM MetaScheduler +5) Model name (`model.#`) +6) Batch size (`batch_size.#`) +in 5 and 6 you can insert any suitable value instead of the symbol `#`, e.g. `model.bert` or `batch_size.8`. + +## Notes + +For PyTorch and TensorFlow frontends you should specify evironment variable `CM_ML_MODEL_INPUT_SHAPES` with input shapes of the model you want to compile (e.g. `"input": (16, 3, 224, 224)`) or separate variables `CM_ML_MODEL_IMAGE_NUM_CHANNELS`, `CM_ML_MODEL_IMAGE_WIDTH`, `CM_ML_MODEL_IMAGE_HEIGHT` for 2D CV models and `CM_ML_MODEL_MAX_SEQ_LENGTH` for language models. +If your model is in ONNX format then all input shapes can be extracted automatically. diff --git a/script/get-tvm-model/README.md b/script/get-tvm-model/README.md new file mode 100644 index 0000000000..ddced43751 --- /dev/null +++ b/script/get-tvm-model/README.md @@ -0,0 +1,290 @@ +Automatically generated README for this automation recipe: **get-tvm-model** + +Category: **AI/ML models** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-tvm-model,c1b7b656b6224307) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,ml-model-tvm,tvm-model* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get ml-model-tvm tvm-model" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,ml-model-tvm,tvm-model` + +`cm run script --tags=get,ml-model-tvm,tvm-model[,variations] ` + +*or* + +`cmr "get ml-model-tvm tvm-model"` + +`cmr "get ml-model-tvm tvm-model [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model-tvm,tvm-model' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,ml-model-tvm,tvm-model"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,ml-model-tvm,tvm-model) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get ml-model-tvm tvm-model[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_tune-model` + - Environment variables: + - *CM_TUNE_TVM_MODEL*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_xgboost + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pandas + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tornado + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**batchsize**" +
+ Click here to expand this section. + + * `_batch_size.#` + - Environment variables: + - *CM_ML_MODEL_MAX_BATCH_SIZE*: `#` + - Workflow: + +
+ + + * Group "**frontend**" +
+ Click here to expand this section. + + * **`_onnx`** (default) + - Environment variables: + - *CM_TVM_FRONTEND_FRAMEWORK*: `onnx` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_onnx + * CM names: `--adr.['onnx']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_pytorch` + - Aliases: `_torch` + - Environment variables: + - *CM_TVM_FRONTEND_FRAMEWORK*: `pytorch` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + * CM names: `--adr.['pytorch', 'torch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_tensorflow` + - Aliases: `_tf` + - Environment variables: + - *CM_TVM_FRONTEND_FRAMEWORK*: `tensorflow` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_tensorflow + * CM names: `--adr.['tensorflow']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_tflite` + - Environment variables: + - *CM_TVM_FRONTEND_FRAMEWORK*: `tflite` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_tflite + * CM names: `--adr.['tflite']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_model.#` + - Environment variables: + - *CM_ML_MODEL*: `#` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * **`_fp32`** (default) + - Workflow: + * `_int8` + - Workflow: + * `_uint8` + - Workflow: + +
+ + + * Group "**runtime**" +
+ Click here to expand this section. + + * `_graph_executor` + - Environment variables: + - *CM_TVM_USE_VM*: `no` + - Workflow: + * **`_virtual_machine`** (default) + - Environment variables: + - *CM_TVM_USE_VM*: `yes` + - Workflow: + +
+ + +#### Default variations + +`_fp32,_onnx,_virtual_machine` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_ML_MODEL_MAX_BATCH_SIZE: `1` +* CM_TUNE_TVM_MODEL: `no` +* CM_TVM_USE_VM: `yes` +* CM_TVM_FRONTEND_FRAMEWORK: `onnx` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,tvm + * CM names: `--adr.['tvm']...` + - CM script: [get-tvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-tvm) + * get,generic-python-lib,_decorator + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_psutil + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_scipy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_attrs + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model/_cm.json)*** + * get,ml-model,raw + * CM names: `--adr.['original-model']...` + - CM script: [get-ml-model-3d-unet-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-3d-unet-kits19) + - CM script: [get-ml-model-bert-base-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-base-squad) + - CM script: [get-ml-model-bert-large-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-bert-large-squad) + - CM script: [get-ml-model-dlrm-terabyte](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-dlrm-terabyte) + - CM script: [get-ml-model-efficientnet-lite](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-efficientnet-lite) + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + - CM script: [get-ml-model-llama2](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-llama2) + - CM script: [get-ml-model-mobilenet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-mobilenet) + - CM script: [get-ml-model-resnet50](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-resnet50) + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + - CM script: [get-ml-model-rnnt](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-rnnt) + - CM script: [get-ml-model-stable-diffusion](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-stable-diffusion) + - CM script: [get-ml-model-tiny-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-tiny-resnet) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm-model/_cm.json) + +___ +### Script output +`cmr "get ml-model-tvm tvm-model [,variations]" -j` +#### New environment keys (filter) + +* `CM_ML_MODEL_*` +* `CM_TUNE_TVM_*` +* `CM_TVM_*` +#### New environment keys auto-detected from customize + +* `CM_ML_MODEL_FILE` +* `CM_ML_MODEL_FILE_WITH_PATH` +* `CM_ML_MODEL_FRAMEWORK` +* `CM_ML_MODEL_INPUT_SHAPES` +* `CM_ML_MODEL_ORIGINAL_FILE_WITH_PATH` +* `CM_ML_MODEL_PATH` +* `CM_TUNE_TVM_MODEL` +* `CM_TVM_FRONTEND_FRAMEWORK` \ No newline at end of file diff --git a/script/get-tvm-model/_cm.json b/script/get-tvm-model/_cm.json new file mode 100644 index 0000000000..c9609efa86 --- /dev/null +++ b/script/get-tvm-model/_cm.json @@ -0,0 +1,202 @@ +{ + "alias": "get-tvm-model", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML models", + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "tvm" + ], + "tags": "get,tvm" + }, + { + "tags": "get,generic-python-lib,_decorator" + }, + { + "tags": "get,generic-python-lib,_psutil" + }, + { + "tags": "get,generic-python-lib,_scipy" + }, + { + "tags": "get,generic-python-lib,_attrs" + } + ], + "default_env": { + "CM_ML_MODEL_MAX_BATCH_SIZE": "1", + "CM_TUNE_TVM_MODEL": "no", + "CM_TVM_USE_VM": "yes", + "CM_TVM_FRONTEND_FRAMEWORK": "onnx" + }, + "new_env_keys": [ + "CM_ML_MODEL_*", + "CM_TUNE_TVM_*", + "CM_TVM_*" + ], + "tags": [ + "get", + "ml-model-tvm", + "tvm-model" + ], + "uid": "c1b7b656b6224307", + "variations": { + "fp32": { + "add_deps_recursive": { + "original-model": { + "tags": "_fp32" + } + }, + "default": true, + "group": "precision" + }, + "int8": { + "add_deps_recursive": { + "original-model": { + "tags": "_int8" + } + }, + "group": "precision" + }, + "uint8": { + "add_deps_recursive": { + "original-model": { + "tags": "_uint8" + } + }, + "group": "precision" + }, + "tune-model": { + "env": { + "CM_TUNE_TVM_MODEL": "yes" + }, + "deps": [ + { + "tags": "get,generic-python-lib,_xgboost" + }, + { + "tags": "get,generic-python-lib,_pandas" + }, + { + "tags": "get,generic-python-lib,_tornado" + } + ] + }, + "virtual_machine": { + "default": true, + "env": { + "CM_TVM_USE_VM": "yes" + }, + "group": "runtime" + }, + "graph_executor": { + "env": { + "CM_TVM_USE_VM": "no" + }, + "group": "runtime" + }, + "onnx": { + "default": true, + "group": "frontend", + "env": { + "CM_TVM_FRONTEND_FRAMEWORK": "onnx" + }, + "deps": [ + { + "names": [ + "onnx" + ], + "tags": "get,generic-python-lib,_onnx" + } + ] + }, + "pytorch": { + "group": "frontend", + "env": { + "CM_TVM_FRONTEND_FRAMEWORK": "pytorch" + }, + "deps": [ + { + "names": [ + "pytorch", + "torch" + ], + "tags": "get,generic-python-lib,_torch" + }, + { + "tags": "get,generic-python-lib,_torchvision" + } + ] + }, + "tensorflow": { + "group": "frontend", + "env": { + "CM_TVM_FRONTEND_FRAMEWORK": "tensorflow" + }, + "deps": [ + { + "names": [ + "tensorflow" + ], + "tags": "get,generic-python-lib,_tensorflow" + } + ] + }, + "tflite": { + "group": "frontend", + "env": { + "CM_TVM_FRONTEND_FRAMEWORK": "tflite" + }, + "deps": [ + { + "names": [ + "tflite" + ], + "tags": "get,generic-python-lib,_tflite" + } + ] + }, + "tf": { + "alias": "tensorflow" + }, + "torch": { + "alias": "pytorch" + }, + "batch_size.#": { + "group": "batchsize", + "env": { + "CM_ML_MODEL_MAX_BATCH_SIZE": "#" + } + }, + "model.#": { + "group": "model", + "env": { + "CM_ML_MODEL": "#" + } + } + }, + "prehook_deps": [ + { + "names": [ + "original-model" + ], + "tags": "get,ml-model,raw", + "update_tags_from_env": [ + "CM_ML_MODEL" + ], + "update_tags_from_env_with_prefix": { + "_": [ + "CM_TVM_FRONTEND_FRAMEWORK" + ] + } + } + ] +} \ No newline at end of file diff --git a/script/get-tvm-model/customize.py b/script/get-tvm-model/customize.py new file mode 100644 index 0000000000..26732a279c --- /dev/null +++ b/script/get-tvm-model/customize.py @@ -0,0 +1,54 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + work_dir = env.get('CM_TUNE_TVM_MODEL_WORKDIR', '') + + if work_dir != '': + if not os.path.exists(work_dir): + raise FileNotFoundError( + f"Error: the specified path \"{work_dir}\"does not exist") + + if not os.path.exists(f"{work_dir}/database_workload.json"): + raise FileNotFoundError( + "Error: the found workdir does not contain database_workload.json") + + if not os.path.exists(f"{work_dir}/database_tuning_record.json"): + raise FileNotFoundError( + "Error: the found workdir does not contain database_tuning_record.json") + + if env.get('CM_TUNE_TVM_MODEL', '') != '': + print("The \"tune-model\" variation is selected, but at the same time the path to the existing \"work_dir\" is also specified. The compiled model will be based on the found existing \"work_dir\".") + env["CM_TUNE_TVM_MODEL"] = "no" + + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_ORIGINAL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['CM_ML_MODEL_FILE'] = 'model-tvm.so' + env['CM_ML_MODEL_PATH'] = os.path.join(os.getcwd()) + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), env['CM_ML_MODEL_FILE']) + env['CM_ML_MODEL_FRAMEWORK'] = "tvm-" + env['CM_ML_MODEL_FRAMEWORK'] + if 'CM_ML_MODEL_INPUT_SHAPES' in env.keys(): + env['CM_ML_MODEL_INPUT_SHAPES'] = env['CM_ML_MODEL_INPUT_SHAPES'].replace( + "BATCH_SIZE", env['CM_ML_MODEL_MAX_BATCH_SIZE']) + if 'CM_TVM_FRONTEND_FRAMEWORK' in env and env['CM_TVM_FRONTEND_FRAMEWORK'] == 'pytorch': + env['CM_PREPROCESS_PYTORCH'] = 'yes' + return {'return':0} diff --git a/script/get-tvm-model/process.py b/script/get-tvm-model/process.py new file mode 100644 index 0000000000..53543e0f83 --- /dev/null +++ b/script/get-tvm-model/process.py @@ -0,0 +1,252 @@ +import os +import tempfile +from typing import Dict, Tuple, Optional, List, Any, Union + +if os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None) == "pytorch": + import torch + import torchvision + +import tvm +from tvm import relay, meta_schedule +from tvm.driver.tvmc.frontends import load_model + +def get_shape_dict_from_onnx( + shape: List[int], + model_path: str +) -> Dict[str, List[int]]: + import onnx + onnx_model = onnx.load(model_path) + if len(shape) == 1: + for _input in onnx_model.graph.input: + tensor_type = _input.type.tensor_type + if (tensor_type.HasField("shape")): + for dimension in tensor_type.shape.dim: + if dimension.dim_value != 0: + shape.append(dimension.dim_value) + input_all = [node.name for node in onnx_model.graph.input] + input_initializer = [node.name for node in onnx_model.graph.initializer] + net_feed_input = list(set(input_all) - set(input_initializer)) + return {input_name: shape for input_name in net_feed_input} + +def get_mod_params( + model_path: str, + model_name: str, + batch_size: int, + frontend: str, + input_shapes_str: Optional[str] = None, + input_layer_name: Optional[str] = None, + num_channels: Optional[int] = None, + image_width: Optional[int] = None, + image_height: Optional[int] = None, + max_seq_length: Optional[int] = None +) -> Tuple[tvm.IRModule, Dict[str, tvm.nd.NDArray]]: + if not input_shapes_str and (not image_width or not image_height) and not max_seq_length and frontend != "onnx": + raise RuntimeError( + "Error: None of environment variables storing shape is set!" + ) + if input_shapes_str: + shape_dict = eval('{' + input_shapes_str.replace('BATCH_SIZE', str(batch_size)) + '}') + else: + shape = [] + if image_width and image_height: + shape = [batch_size, num_channels, image_height, image_width] + elif max_seq_length: + shape = [batch_size, max_seq_length] + if frontend == "onnx": + shape_dict = get_shape_dict_from_onnx(shape if len(shape) > 0 else [batch_size], model_path) + else: + raise RuntimeError( + "Error: Cannot find proper shapes in environment variables" + ) + print(f"Shape dict {shape_dict}") + if frontend == "pytorch": + torch_model = getattr(torchvision.models, model_name)(weights=None) + torch_model.load_state_dict(torch.load(model_path)) + torch_model.fc = torch.nn.Sequential( + torch_model.fc, + torch.nn.Softmax(dim=1) + ) + torch_model = torch_model.eval() + shape_list = list(shape_dict.items()) + input_data = torch.randn(shape_list[0][1]) + traced_model = torch.jit.trace(torch_model, input_data).eval() + mod, params = tvm.relay.frontend.from_pytorch(traced_model, shape_list) + else: + tvmc_model = load_model(path=model_path, shape_dict=shape_dict) + mod, params = tvm.relay.transform.DynamicToStatic()(tvmc_model.mod), tvmc_model.params + + input_layer_name_file = os.path.join(os.getcwd(), "input_layer_name") + if not input_layer_name: + input_layer_name = shape_dict.keys()[0] + with open(input_layer_name_file, 'w') as file: + file.write(input_layer_name) + + return mod, params + +def tune_model( + mod: tvm.IRModule, + params: Dict[str, tvm.nd.NDArray], + target: tvm.target.Target, +) -> Tuple[str, meta_schedule.database.Database]: + work_dir = os.path.join(os.getcwd(), "metaschedule_workdir") + if not os.path.exists(work_dir): + os.mkdir(work_dir) + print("Extracting tasks...") + extracted_tasks = meta_schedule.relay_integration.extract_tasks( + mod, target, params + ) + tasks, task_weights = meta_schedule.relay_integration.extracted_tasks_to_tune_contexts( + extracted_tasks, work_dir, strategy="evolutionary" + ) + + print("Begin tuning...") + evaluator_config = meta_schedule.runner.config.EvaluatorConfig( + number=1, + repeat=10, + enable_cpu_cache_flush=True + ) + database = meta_schedule.tune.tune_tasks( + tasks=tasks, + task_weights=task_weights, + work_dir=work_dir, + max_trials_global=10000, + num_trials_per_iter=64, + max_trials_per_task=512, + builder=meta_schedule.builder.LocalBuilder(), + runner=meta_schedule.runner.LocalRunner( + evaluator_config=evaluator_config + ), + ) + + return work_dir, database + + +def compile_model( + mod: tvm.IRModule, + params: Dict[str, tvm.nd.NDArray], + work_dir: str, + target: tvm.target.Target, + opt_level: int, + build_conf: Dict[str, Any], + use_vm: bool, + database: Optional[meta_schedule.database.Database] = None, +) -> Union[tvm.runtime.Module, tvm.runtime.vm.Executable]: + if work_dir != '': + if not database: + database = meta_schedule.database.JSONDatabase( + f"{work_dir}/database_workload.json", + f"{work_dir}/database_tuning_record.json", + allow_missing=False + ) + build_conf["relay.backend.use_meta_schedule"] = True + with tvm.transform.PassContext( + opt_level=opt_level, + config=build_conf + ): + lib = meta_schedule.relay_integration.compile_relay( + database=database, + mod=mod, + target=target, + params=params, + backend="vm" if use_vm else "graph" + ) + else: + with tvm.transform.PassContext( + opt_level=opt_level, + config=build_conf, + ): + if use_vm: + lib = tvm.relay.backend.vm.compile( + mod=mod, + target=target, + params=params + ) + else: + lib = tvm.relay.build( + mod, + target=target, + params=params + ) + return lib + +def serialize_vm( + vm_exec: tvm.runtime.vm.Executable +) -> tvm.runtime.Module: + path_consts = os.path.join( + tempfile.mkdtemp( + dir=os.getcwd(), + suffix="-tvm-tmp" + ), + "consts" + ) + code_path = os.path.join(os.getcwd(), "vm_exec_code.ro") + vm_exec.move_late_bound_consts(path_consts, byte_limit=256) + code, lib = vm_exec.save() + with open(code_path, "wb") as file: + file.write(code) + return lib + +def main() -> None: + model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None) + compiled_model = os.path.join(os.getcwd(), 'model-tvm.so') + print('TVM model: ' + model_path) + if model_path.endswith('.so') or model_path.endswith('.dylib'): + compiled_model = model_path + if not os.path.isfile(compiled_model): + print('') + raise RuntimeError( + f"Error: Model file {compiled_model} not found!" + ) + else: + mod, params = get_mod_params( + model_path=os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None), + model_name=os.environ.get('CM_ML_MODEL', '').strip().lower(), + batch_size=int(os.environ.get('CM_ML_MODEL_MAX_BATCH_SIZE', 1)), + frontend=os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None), + input_shapes_str=os.environ.get('CM_ML_MODEL_INPUT_SHAPES', None), + input_layer_name=os.environ.get('CM_ML_MODEL_INPUT_LAYER_NAME', None), + num_channels=int(os.environ.get('CM_ML_MODEL_IMAGE_NUM_CHANNELS', 3)), + image_width=int(os.environ.get('CM_ML_MODEL_IMAGE_WIDTH', 0)), + image_height=int(os.environ.get('CM_ML_MODEL_IMAGE_HEIGHT', 0)), + max_seq_length=int(os.environ.get('CM_ML_MODEL_MAX_SEQ_LENGTH', 0)), + ) + opt_level = int(os.environ.get('CM_MLPERF_TVM_OPT_LEVEL', 3)) + target = os.environ.get( + 'CM_MLPERF_TVM_TARGET', + f"llvm -num-cores {os.environ.get('CM_HOST_CPU_TOTAL_CORES', '1')}" + ) + build_conf = {} + target_host = None + tvm_target = tvm.target.Target(target, host=target_host) + tune_model_flag = os.environ.get('CM_TUNE_TVM_MODEL', 'no') == 'yes' + work_dir = '' + database = None + use_vm = os.environ.get('CM_TVM_USE_VM', 'no') == 'yes' + if tune_model_flag: + work_dir, database = tune_model( + mod=mod, + params=params, + target=tvm_target, + ) + lib = compile_model( + mod=mod, + params=params, + work_dir=work_dir if work_dir != '' else os.environ.get('CM_TUNE_TVM_MODEL_WORKDIR', ''), + target=tvm_target, + opt_level=opt_level, + build_conf=build_conf, + use_vm=use_vm, + database=database + ) + if use_vm: + lib = serialize_vm( + vm_exec=lib + ) + + with open(os.path.join(os.getcwd(), "tvm_executor"), "w") as file: + file.write("virtual_machine" if use_vm else "graph_executor") + lib.export_library(compiled_model) + print('TVM compiled model: ' + compiled_model) + +if __name__ == "__main__": + main() diff --git a/script/get-tvm-model/run.sh b/script/get-tvm-model/run.sh new file mode 100644 index 0000000000..6b18e39dfb --- /dev/null +++ b/script/get-tvm-model/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/process.py" + +echo $cmd + +eval $cmd diff --git a/script/get-tvm/README-extra.md b/script/get-tvm/README-extra.md new file mode 100644 index 0000000000..ae5cc929ee --- /dev/null +++ b/script/get-tvm/README-extra.md @@ -0,0 +1,5 @@ +```bash +cm run script "get llvm" --version=14.0.0 +cm run script "get tvm _llvm" --version=0.10.0 +cm run script "python app image-classification tvm-onnx" +``` diff --git a/script/get-tvm/README.md b/script/get-tvm/README.md new file mode 100644 index 0000000000..9c7fafd64d --- /dev/null +++ b/script/get-tvm/README.md @@ -0,0 +1,200 @@ +Automatically generated README for this automation recipe: **get-tvm** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-tvm,93c89140e6224f4b) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,tvm,get-tvm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get tvm get-tvm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,tvm,get-tvm` + +`cm run script --tags=get,tvm,get-tvm[,variations] ` + +*or* + +`cmr "get tvm get-tvm"` + +`cmr "get tvm get-tvm [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,tvm,get-tvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,tvm,get-tvm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,tvm,get-tvm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get tvm get-tvm[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_cuda` + - Environment variables: + - *CM_TVM_USE_CUDA*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_openmp` + - Environment variables: + - *CM_TVM_USE_OPENMP*: `yes` + - Workflow: + +
+ + + * Group "**installation-type**" +
+ Click here to expand this section. + + * **`_llvm`** (default) + - Environment variables: + - *CM_TVM_USE_LLVM*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,llvm + * CM names: `--adr.['llvm']...` + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + * `_pip-install` + - Environment variables: + - *CM_TVM_PIP_INSTALL*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_apache-tvm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + +#### Default variations + +`_llvm` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_CHECKOUT: `main` +* CM_GIT_URL: `https://github.com/apache/tvm` +* CM_TVM_PIP_INSTALL: `no` + +
+ +#### Versions +* `main` +* `v0.10.0` +* `v0.7.0` +* `v0.8.0` +* `v0.9.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm/_cm.json)*** + * cmake,get-cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,generic-python-lib,_typing_extensions + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_decorator + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_scipy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_attrs + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_psutil + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-tvm/_cm.json) + +___ +### Script output +`cmr "get tvm get-tvm [,variations]" -j` +#### New environment keys (filter) + +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +* `+PYTHONPATH` +* `CM_TVM_*` +* `TVM_HOME` +#### New environment keys auto-detected from customize + +* `CM_TVM_PATH_INCLUDE` +* `CM_TVM_PATH_LIB` \ No newline at end of file diff --git a/script/get-tvm/_cm.json b/script/get-tvm/_cm.json new file mode 100644 index 0000000000..dd81f21c4e --- /dev/null +++ b/script/get-tvm/_cm.json @@ -0,0 +1,124 @@ +{ + "alias": "get-tvm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML frameworks", + "cache": true, + "deps": [ + { + "tags": "cmake,get-cmake", + "version_min": "3.18" + }, + { + "tags": "detect,cpu" + }, + { + "tags": "get,generic-python-lib,_typing_extensions" + }, + { + "tags": "get,generic-python-lib,_decorator" + }, + { + "tags": "get,generic-python-lib,_scipy" + }, + { + "tags": "get,generic-python-lib,_attrs" + }, + { + "tags": "get,generic-python-lib,_psutil" + } + ], + "default_env": { + "CM_GIT_CHECKOUT": "main", + "CM_GIT_URL": "https://github.com/apache/tvm", + "CM_TVM_PIP_INSTALL": "no" + }, + "extra_cache_tags_from_env": [ + { + "env": "CM_LLVM_CACHE_TAGS", + "prefix": "llvm-" + } + ], + "new_env_keys": [ + "TVM_HOME", + "CM_TVM_*", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH", + "+PYTHONPATH" + ], + "tags": [ + "get", + "tvm", + "get-tvm" + ], + "uid": "93c89140e6224f4b", + "variations": { + "pip-install": { + "group": "installation-type", + "deps": [ + { + "tags": "get,generic-python-lib,_apache-tvm" + } + ], + "env": { + "CM_TVM_PIP_INSTALL": "yes" + } + }, + "cuda": { + "deps": [ + { + "tags": "get,cuda" + } + ], + "env": { + "CM_TVM_USE_CUDA": "yes" + } + }, + "llvm": { + "group": "installation-type", + "default": true, + "deps": [ + { + "names": [ "llvm" ], + "tags": "get,llvm", + "version_min": "14.0.0" + } + ], + "env": { + "CM_TVM_USE_LLVM": "yes" + } + }, + "openmp": { + "env": { + "CM_TVM_USE_OPENMP": "yes" + } + } + }, + "versions": { + "main": { + "env": { + "CM_GIT_CHECKOUT": "main" + } + }, + "v0.7.0": { + "env": { + "CM_GIT_CHECKOUT": "v0.7.0" + } + }, + "v0.8.0": { + "env": { + "CM_GIT_CHECKOUT": "v0.8.0" + } + }, + "v0.9.0": { + "env": { + "CM_GIT_CHECKOUT": "v0.9.0" + } + }, + "v0.10.0": { + "env": { + "CM_GIT_CHECKOUT": "v0.10.0" + } + } + } +} diff --git a/script/get-tvm/customize.py b/script/get-tvm/customize.py new file mode 100644 index 0000000000..fba65ec4b7 --- /dev/null +++ b/script/get-tvm/customize.py @@ -0,0 +1,50 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + if env.get('CM_TVM_PIP_INSTALL', '') == "yes": + return {'return':0} + + + tvm_home = env['TVM_HOME'] + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH']=[] + env['+PYTHONPATH']=[] + + env['+PYTHONPATH'].append(os.path.join(tvm_home,'python')) + + + # Prepare paths + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + env[key] = [] + + ## Include + include_path = os.path.join(tvm_home, 'include') + if os.path.isdir(include_path): + if os_info['platform'] != 'windows': + env['+C_INCLUDE_PATH'].append(include_path) + env['+CPLUS_INCLUDE_PATH'].append(include_path) + + env['CM_TVM_PATH_INCLUDE'] = include_path + + ## Lib + lib_path = os.path.join(tvm_home, 'build') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + env['CM_TVM_PATH_LIB'] = lib_path + + + return {'return':0} diff --git a/script/get-tvm/run.sh b/script/get-tvm/run.sh new file mode 100644 index 0000000000..e7c492058b --- /dev/null +++ b/script/get-tvm/run.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +CUR_DIR=$PWD + +if [ "${CM_TVM_PIP_INSTALL}" != "no" ]; then + exit 0; +fi + +echo "******************************************************" +echo "Path for TVM: ${CUR_DIR}" +echo "" + +if [ ! -d "tvm" ]; then + echo "git clone --recursive -b ${CM_GIT_CHECKOUT} ${CM_GIT_URL} tvm" + git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} tvm + test $? -eq 0 || exit 1 +fi + +cd tvm +if [ "${CM_GIT_SHA}" != "" ]; then + echo "git checkout ${CM_GIT_SHA}" + git checkout ${CM_GIT_SHA} + test $? -eq 0 || exit 1 +fi + + +if [ ! -d "${CUR_DIR}/tvm/build" ]; then + echo "******************************************************" + echo "Configuring TVM ..." + echo "" + + mkdir -p "${CUR_DIR}/tvm/build" + + cp cmake/config.cmake ${CUR_DIR}/tvm/build + + cd ${CUR_DIR}/tvm/build + + if [[ ${CM_TVM_USE_LLVM} == "yes" ]]; then + if [[ -z "${CM_LLVM_INSTALLED_PATH}" ]]; then + llvm_version=$(echo "${CM_LLVM_CLANG_VERSION}" | cut -d. -f1) + sed -i.bak "s|set(USE_LLVM OFF)|set(USE_LLVM llvm-config-$llvm_version)|" config.cmake + else + sed -i.bak "s|set(USE_LLVM OFF)|set(USE_LLVM ${CM_LLVM_INSTALLED_PATH}/llvm-config)|" config.cmake + fi + fi + + if [[ ${CM_TVM_USE_OPENMP} == "yes" ]]; then + sed -i.bak 's/set(USE_OPENMP none)/set(USE_OPENMP gnu)/' config.cmake + fi + + if [[ ${CM_TVM_USE_CUDA} == "yes" ]]; then + sed -i.bak 's/set(USE_CUDA OFF)/set(USE_OPENMP ON)/' config.cmake + echo 'set(USE_CUDA ON)' >> config.cmake + fi + + cmake .. + test $? -eq 0 || exit 1 +fi + +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +echo "******************************************************" +echo "Building TVM using ${CM_MAKE_CORES} cores ..." +echo "" + +cd ${CUR_DIR}/tvm/build + +make -j${CM_MAKE_CORES} +test $? -eq 0 || exit 1 + +INSTALL_DIR=$PWD + +cd ../../ + +echo "TVM_HOME=$PWD/tvm" > tmp-run-env.out +echo "CM_TVM_INSTALLED_PATH=$PWD/tvm" >> tmp-run-env.out + +echo "******************************************************" +echo "TVM was built and installed to ${INSTALL_DIR} ..." diff --git a/script/get-xilinx-sdk/README.md b/script/get-xilinx-sdk/README.md new file mode 100644 index 0000000000..2b402b84cc --- /dev/null +++ b/script/get-xilinx-sdk/README.md @@ -0,0 +1,140 @@ +Automatically generated README for this automation recipe: **get-xilinx-sdk** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-xilinx-sdk,76d4d1bd09df4490) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,xilinx,sdk* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get xilinx sdk" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,xilinx,sdk` + +`cm run script --tags=get,xilinx,sdk [--input_flags]` + +*or* + +`cmr "get xilinx sdk"` + +`cmr "get xilinx sdk " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,xilinx,sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,xilinx,sdk"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,xilinx,sdk) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get xilinx sdk" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input=value` → `CM_XILINX_SDK_FILE_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `2019.1` + +* `2019.1` +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-xilinx-sdk/_cm.json) + +___ +### Script output +`cmr "get xilinx sdk " [--input_flags] -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_XILINX_*` +#### New environment keys auto-detected from customize diff --git a/script/get-xilinx-sdk/_cm.json b/script/get-xilinx-sdk/_cm.json new file mode 100644 index 0000000000..b0b1cd02a6 --- /dev/null +++ b/script/get-xilinx-sdk/_cm.json @@ -0,0 +1,35 @@ +{ + "alias": "get-xilinx-sdk", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [], + "default_version": "2019.1", + "input_description": {}, + "input_mapping": { + "input": "CM_XILINX_SDK_FILE_PATH" + }, + "new_env_keys": [ + "CM_XILINX_*", + "+PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "tags": [ + "get", + "xilinx", + "sdk" + ], + "uid": "76d4d1bd09df4490", + "variations": {}, + "versions": { + "2019.1": { + "env": { + "CM_DOWNLOAD_URL": "https://www.xilinx.com/member/forms/download/xef.html?filename=Xilinx_SDK_2019.1_0524_1430_Lin64.bin", + "CM_DOWNLOAD_CHECKSUM": "7ccb3840d36c305a7cb34b314db7d7f2" + } + } + } +} diff --git a/script/get-xilinx-sdk/customize.py b/script/get-xilinx-sdk/customize.py new file mode 100644 index 0000000000..02e31c620f --- /dev/null +++ b/script/get-xilinx-sdk/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + file_path = env.get("CM_XILINX_SDK_BIN_PATH") + if not file_path or not os.path.exists(file_path): + return {'return':1, 'error': 'FILE_PATH does not exist'} + + bin_folder_path = os.path.dirname(file_path) + if '+PATH' in env: + env['+PATH'].append(bin_foler_path) + else: + env['+PATH'] = [ bin_folder_path ] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/get-xilinx-sdk/run.sh b/script/get-xilinx-sdk/run.sh new file mode 100644 index 0000000000..3a584c10cf --- /dev/null +++ b/script/get-xilinx-sdk/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/script/get-zendnn/README.md b/script/get-zendnn/README.md new file mode 100644 index 0000000000..d3f560dcab --- /dev/null +++ b/script/get-zendnn/README.md @@ -0,0 +1,129 @@ +Automatically generated README for this automation recipe: **get-zendnn** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-zendnn,d1c6feb0ee684b09) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,zendnn,amd,from.src* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get zendnn amd from.src" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,zendnn,amd,from.src` + +`cm run script --tags=get,zendnn,amd,from.src ` + +*or* + +`cmr "get zendnn amd from.src"` + +`cmr "get zendnn amd from.src " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,zendnn,amd,from.src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,zendnn,amd,from.src"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,zendnn,amd,from.src) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get zendnn amd from.src" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/_cm.json)*** + * get,amd,aocl + * CM names: `--adr.['aocl']...` + - CM script: [get-aocl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-aocl) + * get,lib,blis,_amd + - CM script: [get-blis](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-blis) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,git,_repo.https://github.com/amd/ZenDNN.git + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zendnn/_cm.json) + +___ +### Script output +`cmr "get zendnn amd from.src " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/get-zendnn/_cm.json b/script/get-zendnn/_cm.json new file mode 100644 index 0000000000..6575a37aed --- /dev/null +++ b/script/get-zendnn/_cm.json @@ -0,0 +1,41 @@ +{ + "alias": "get-zendnn", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + { + "names": [ + "aocl" + ], + "tags": "get,amd,aocl" + }, + { + "tags": "get,lib,blis,_amd" + }, + { + "tags": "detect,cpu" + }, + { + "tags":"get,git,_repo.https://github.com/amd/ZenDNN.git" + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "zendnn", + "amd", + "from.src" + ], + "uid": "d1c6feb0ee684b09", + "variations": { + }, + "versions": {} +} diff --git a/script/get-zendnn/customize.py b/script/get-zendnn/customize.py new file mode 100644 index 0000000000..d9918a266d --- /dev/null +++ b/script/get-zendnn/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + env['ZENDNN_BLIS_PATH'] = env['CM_BLIS_INSTALL_PATH'] + env['ZENDNN_LIBM_PATH'] = env['CM_AOCL_BUILD_PATH'] + + env['ZENDNN_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/get-zendnn/run.bat b/script/get-zendnn/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/get-zendnn/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/get-zendnn/run.sh b/script/get-zendnn/run.sh new file mode 100644 index 0000000000..58026fcbb9 --- /dev/null +++ b/script/get-zendnn/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +cd ${ZENDNN_SRC_PATH} + +make clean +test $? -eq 0 || exit $? + +source scripts/zendnn_build.sh gcc +test $? -eq 0 || exit $? diff --git a/script/get-zephyr-sdk/README-extra.md b/script/get-zephyr-sdk/README-extra.md new file mode 100644 index 0000000000..3c139b6072 --- /dev/null +++ b/script/get-zephyr-sdk/README-extra.md @@ -0,0 +1,19 @@ +# GET-ZEPHYR-SDK +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs the [Zephyr-SDK](https://github.com/zephyrproject-rtos/sdk-ng/releases) from a prebuilt binary. + +## Install +```bash +cm run script --tags=get,zephyr-sdk --version=0.13.2 +``` +## Exported Variables +1. [ZEPHYR_SDK_INSTALL_DIR](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-zephyr-sdk/customize.py#L13): Location in CM cache where Zephyr SDK is installed. +2. [ZEPHYR_TOOLCHAIN_VARIANT](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-zephyr-sdk/customize.py#L12) + +## Supported Versions +1. 0.13.1 +2. 0.13.2 +3. 0.15.0 + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-zephyr-sdk/README.md b/script/get-zephyr-sdk/README.md new file mode 100644 index 0000000000..070b698cf7 --- /dev/null +++ b/script/get-zephyr-sdk/README.md @@ -0,0 +1,128 @@ +Automatically generated README for this automation recipe: **get-zephyr-sdk** + +Category: **TinyML automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-zephyr-sdk,c70ae1a7567f4a7b) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,zephyr-sdk* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get zephyr-sdk" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,zephyr-sdk` + +`cm run script --tags=get,zephyr-sdk ` + +*or* + +`cmr "get zephyr-sdk"` + +`cmr "get zephyr-sdk " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,zephyr-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,zephyr-sdk"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,zephyr-sdk) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get zephyr-sdk" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `0.13.2` + +* `0.13.1` +* `0.13.2` +* `0.15.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr-sdk/_cm.json) + +___ +### Script output +`cmr "get zephyr-sdk " -j` +#### New environment keys (filter) + +* `ZEPHYR_*` +#### New environment keys auto-detected from customize diff --git a/script/get-zephyr-sdk/_cm.json b/script/get-zephyr-sdk/_cm.json new file mode 100644 index 0000000000..ee57aaaf3c --- /dev/null +++ b/script/get-zephyr-sdk/_cm.json @@ -0,0 +1,39 @@ +{ + "alias": "get-zephyr-sdk", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "TinyML automation", + "cache": true, + "clean_files": [], + "default_version": "0.13.2", + "deps": [ + { + "tags": "detect,os" + } + ], + "tags": [ + "get", + "zephyr-sdk" + ], + "new_env_keys": [ + "ZEPHYR_*" + ], + "uid": "c70ae1a7567f4a7b", + "versions": { + "0.13.1": { + "env": { + "CM_ZEPHYR_SDK_VERSION": "0.13.1" + } + }, + "0.13.2": { + "env": { + "CM_ZEPHYR_SDK_VERSION": "0.13.2" + } + }, + "0.15.0": { + "env": { + "CM_ZEPHYR_SDK_VERSION": "0.15.0" + } + } + } +} diff --git a/script/get-zephyr-sdk/customize.py b/script/get-zephyr-sdk/customize.py new file mode 100644 index 0000000000..87619e7a01 --- /dev/null +++ b/script/get-zephyr-sdk/customize.py @@ -0,0 +1,15 @@ +from cmind import utils +import os + +def preprocess(i): + env = i['env'] + return {'return':0} + + +def postprocess(i): + + env = i['env'] + env['ZEPHYR_TOOLCHAIN_VARIANT'] = "zephyr" + env['ZEPHYR_SDK_INSTALL_DIR'] = os.path.join(os.getcwd(), "zephyr-sdk-" + env['CM_ZEPHYR_SDK_VERSION']) + + return {'return':0} diff --git a/script/get-zephyr-sdk/run.sh b/script/get-zephyr-sdk/run.sh new file mode 100644 index 0000000000..07c55e078b --- /dev/null +++ b/script/get-zephyr-sdk/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +version=${CM_ZEPHYR_SDK_VERSION} +os=${CM_HOST_OS_TYPE} +if [ $os == "darwin" ]; then + os=${CM_HOST_OS_FLAVOR} +fi +platform=${CM_HOST_OS_MACHINE} +if [ $platform == "arm64" ]; then + platform=aarch64 +fi + +file=zephyr-sdk-${version}-${os}-${platform}-setup.run +url=https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${version}/$file +wget -nc "${url}" +if [ "${?}" != "0" ]; then exit 1; fi +chmod +x $file +./$file -- -d $PWD/zephyr-sdk-$version -y + +if [ "${?}" != "0" ]; then exit 1; fi + diff --git a/script/get-zephyr/README-extra.md b/script/get-zephyr/README-extra.md new file mode 100644 index 0000000000..34aa511ce1 --- /dev/null +++ b/script/get-zephyr/README-extra.md @@ -0,0 +1,8 @@ +# GET-ZEPHYR +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs the [Zephyr](https://github.com/zephyrproject-rtos/zephyr) real-time OS including all the needed system and python dependencies using its own command line tool [west](https://docs.zephyrproject.org/latest/develop/west/index.html). +## Exported Variables +1. [ZEPHYR_DIR](https://github.com/octoml/ck/blob/master/cm-mlops/script/get-zephyr/customize.py#L15): Location in CM cache where Zephyr is installed. + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/script/get-zephyr/README.md b/script/get-zephyr/README.md new file mode 100644 index 0000000000..5b31dbb4f2 --- /dev/null +++ b/script/get-zephyr/README.md @@ -0,0 +1,134 @@ +Automatically generated README for this automation recipe: **get-zephyr** + +Category: **TinyML automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-zephyr,d4105c2cdb044276) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,zephyr* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get zephyr" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,zephyr` + +`cm run script --tags=get,zephyr ` + +*or* + +`cmr "get zephyr"` + +`cmr "get zephyr " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,zephyr' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,zephyr"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,zephyr) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get zephyr" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `v2.7` + +* `v2.7` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,generic-python-lib,_west + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/_cm.json) + 1. ***Run native script if exists*** + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-zephyr/_cm.json) + +___ +### Script output +`cmr "get zephyr " -j` +#### New environment keys (filter) + +* `CM_ZEPHYR_*` +#### New environment keys auto-detected from customize + +* `CM_ZEPHYR_DIR` \ No newline at end of file diff --git a/script/get-zephyr/_cm.json b/script/get-zephyr/_cm.json new file mode 100644 index 0000000000..60cc546ec2 --- /dev/null +++ b/script/get-zephyr/_cm.json @@ -0,0 +1,41 @@ +{ + "alias": "get-zephyr", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "TinyML automation", + "clean_files": [], + "default_version": "v2.7", + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3", + "version_min": "3.7.0" + }, + { + "tags": "get,cmake", + "version_min": "3.20.0" + }, + { + "tags": "get,generic-python-lib,_west" + } + ], + "tags": [ + "get", + "zephyr" + ], + "new_env_keys": [ + "CM_ZEPHYR_*" + ], + "uid": "d4105c2cdb044276", + "versions": { + "v2.7": { + "env": { + "CM_ZEPHYR_VERSION": "v2.7" + } + } + } +} diff --git a/script/get-zephyr/customize.py b/script/get-zephyr/customize.py new file mode 100644 index 0000000000..c157b165ce --- /dev/null +++ b/script/get-zephyr/customize.py @@ -0,0 +1,17 @@ +from cmind import utils +import os + +def preprocess(i): + env = i['env'] + if '+PATH' not in env: + env['+PATH'] = [] + env['+PATH'].append("$HOME/.local/bin") + return {'return':0} + + +def postprocess(i): + + env = i['env'] + env['CM_ZEPHYR_DIR'] = os.path.join(os.getcwd(), "zephyr") + + return {'return':0} diff --git a/script/get-zephyr/run-ubuntu.sh b/script/get-zephyr/run-ubuntu.sh new file mode 100644 index 0000000000..a8cb216b47 --- /dev/null +++ b/script/get-zephyr/run-ubuntu.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +sudo apt-get install -y --no-install-recommends gcc-multilib g++-multilib libsdl2-dev +. ${CM_TMP_CURRENT_SCRIPT_PATH}/run.sh diff --git a/script/get-zephyr/run.sh b/script/get-zephyr/run.sh new file mode 100644 index 0000000000..7d5b53245e --- /dev/null +++ b/script/get-zephyr/run.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +CM_PYTHON_BIN_WITH_PATH=${CM_PYTHON_BIN_WITH_PATH:-python3} + +CUR=`pwd` + +if [ "${?}" != "0" ]; then exit 1; fi + +if [ ! -d "zephyr" ]; then + west init --mr ${CM_ZEPHYR_VERSION}-branch $CUR + if [ "${?}" != "0" ]; then exit 1; fi +fi + +cd $CUR/zephyr +west update +if [ "${?}" != "0" ]; then exit 1; fi +west zephyr-export +if [ "${?}" != "0" ]; then exit 1; fi +${CM_PYTHON_BIN_WITH_PATH} -m pip install -r $CUR/zephyr/scripts/requirements.txt +if [ "${?}" != "0" ]; then exit 1; fi + diff --git a/script/gui/README-about.md b/script/gui/README-about.md new file mode 100644 index 0000000000..064b0f257e --- /dev/null +++ b/script/gui/README-about.md @@ -0,0 +1,15 @@ +This CM script provides a unified GUI to run CM scripts using [Streamlit library](https://streamlit.io). + +If you want to run it in a cloud (Azure, AWS, GCP), you need to open some port and test that you can reach it from outside. + +By default, streamlit uses port 8501 but you can change it as follows: + +```bash +cm run script "cm gui" --port 80 +``` + +If you have troubles accessing this port, use this simple python module to test if your port is open: +```bash +python3 -m http.server 80 +``` + diff --git a/script/gui/README.md b/script/gui/README.md new file mode 100644 index 0000000000..20ee96c07c --- /dev/null +++ b/script/gui/README.md @@ -0,0 +1,245 @@ +Automatically generated README for this automation recipe: **gui** + +Category: **GUI** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=gui,605cac42514a4c69) ]* + +--- + +This CM script provides a unified GUI to run CM scripts using [Streamlit library](https://streamlit.io). + +If you want to run it in a cloud (Azure, AWS, GCP), you need to open some port and test that you can reach it from outside. + +By default, streamlit uses port 8501 but you can change it as follows: + +```bash +cm run script "cm gui" --port 80 +``` + +If you have troubles accessing this port, use this simple python module to test if your port is open: +```bash +python3 -m http.server 80 +``` + + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *cm,gui,cm-gui,script-gui,cm-script-gui,streamlit* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "cm gui cm-gui script-gui cm-script-gui streamlit" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=cm,gui,cm-gui,script-gui,cm-script-gui,streamlit` + +`cm run script --tags=cm,gui,cm-gui,script-gui,cm-script-gui,streamlit[,variations] [--input_flags]` + +*or* + +`cmr "cm gui cm-gui script-gui cm-script-gui streamlit"` + +`cmr "cm gui cm-gui script-gui cm-script-gui streamlit [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + + +#### Input Flags + +* --**script**=script tags +* --**app**=gui app + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "script":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'cm,gui,cm-gui,script-gui,cm-script-gui,streamlit' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="cm,gui,cm-gui,script-gui,cm-script-gui,streamlit"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=cm,gui,cm-gui,script-gui,cm-script-gui,streamlit) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "cm gui cm-gui script-gui cm-script-gui streamlit[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**app**" +
+ Click here to expand this section. + + * `_chatgpt` + - Environment variables: + - *CM_GUI_APP*: `chatgpt` + - Workflow: + * `_graph` + - Environment variables: + - *CM_GUI_APP*: `graph` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * get,generic-python-lib,_matplotlib + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_mpld3 + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_main` + - Environment variables: + - *CM_GUI_APP*: `app` + - Workflow: + * `_playground` + - Environment variables: + - *CM_GUI_APP*: `playground` + - Workflow: + 1. ***Read "prehook_deps" on other CM scripts*** + * get,generic-python-lib,_matplotlib + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_mpld3 + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_streamlit_option_menu + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pandas + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.plotly + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.streamlit-aggrid + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--address=value` → `CM_GUI_ADDRESS=value` +* `--app=value` → `CM_GUI_APP=value` +* `--exp_key_c=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C=value` +* `--exp_key_s=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S=value` +* `--exp_key_x=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X=value` +* `--exp_key_y=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y=value` +* `--exp_max_results=value` → `CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS=value` +* `--exp_name=value` → `CM_GUI_GRAPH_EXPERIMENT_NAME=value` +* `--exp_tags=value` → `CM_GUI_GRAPH_EXPERIMENT_TAGS=value` +* `--exp_title=value` → `CM_GUI_GRAPH_EXPERIMENT_TITLE=value` +* `--exp_uid=value` → `CM_GUI_GRAPH_EXPERIMENT_RESULT_UID=value` +* `--no_browser=value` → `CM_GUI_NO_BROWSER=value` +* `--no_run=value` → `CM_GUI_NO_RUN=value` +* `--port=value` → `CM_GUI_PORT=value` +* `--prefix=value` → `CM_GUI_SCRIPT_PREFIX_LINUX=value` +* `--script=value` → `CM_GUI_SCRIPT_TAGS=value` +* `--title=value` → `CM_GUI_TITLE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "address":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GUI_EXTRA_CMD: `` +* CM_GUI_SCRIPT_PREFIX_LINUX: `gnome-terminal --` +* CM_GUI_APP: `app` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_cmind + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_streamlit + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/gui/_cm.yaml) + +___ +### Script output +`cmr "cm gui cm-gui script-gui cm-script-gui streamlit [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/gui/_cm.yaml b/script/gui/_cm.yaml new file mode 100644 index 0000000000..efa578d659 --- /dev/null +++ b/script/gui/_cm.yaml @@ -0,0 +1,106 @@ +# Identification of this CM script +alias: gui +uid: 605cac42514a4c69 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "GUI" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - cm + - gui + - cm-gui + - script-gui + - cm-script-gui + - streamlit + +# Map script inputs to environment variables +input_mapping: + script: CM_GUI_SCRIPT_TAGS + prefix: CM_GUI_SCRIPT_PREFIX_LINUX + port: CM_GUI_PORT + address: CM_GUI_ADDRESS + title: CM_GUI_TITLE + no_browser: CM_GUI_NO_BROWSER + no_run: CM_GUI_NO_RUN + app: CM_GUI_APP + exp_tags: CM_GUI_GRAPH_EXPERIMENT_TAGS + exp_name: CM_GUI_GRAPH_EXPERIMENT_NAME + exp_max_results: CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS + exp_uid: CM_GUI_GRAPH_EXPERIMENT_RESULT_UID + exp_title: CM_GUI_GRAPH_EXPERIMENT_TITLE + exp_key_x: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X + exp_key_y: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y + exp_key_c: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C + exp_key_s: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S + +default_env: + CM_GUI_EXTRA_CMD: "" + CM_GUI_SCRIPT_PREFIX_LINUX: "gnome-terminal --" + CM_GUI_APP: app + +# Dependencies on other CM scripts +deps: + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + - tags: get,generic-python-lib,_cmind + - tags: get,generic-python-lib,_streamlit + +# Variations to customize dependencies +variations: + main: + group: + app + env: + CM_GUI_APP: 'app' + + graph: + group: + app + env: + CM_GUI_APP: 'graph' + prehook_deps: + - tags: get,generic-python-lib,_matplotlib + - tags: get,generic-python-lib,_mpld3 + + playground: + group: + app + env: + CM_GUI_APP: 'playground' + prehook_deps: + - tags: get,generic-python-lib,_matplotlib + - tags: get,generic-python-lib,_mpld3 + - tags: get,generic-python-lib,_streamlit_option_menu + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pandas + - tags: get,generic-python-lib,_package.plotly + - tags: get,generic-python-lib,_package.streamlit-aggrid + + chatgpt: + group: + app + env: + CM_GUI_APP: 'chatgpt' + + +input_description: + script: "script tags" + app: "gui app" diff --git a/script/gui/app.py b/script/gui/app.py new file mode 100644 index 0000000000..0f4f93d21f --- /dev/null +++ b/script/gui/app.py @@ -0,0 +1,72 @@ +# Developer(s): Grigori Fursin + +import streamlit as st +import os +import cmind + +import misc + +def main(): + + query_params = misc.get_params(st) + + script_path = os.environ.get('CM_GUI_SCRIPT_PATH','') + script_alias = os.environ.get('CM_GUI_SCRIPT_ALIAS','') + title = os.environ.get('CM_GUI_TITLE', '') + + # Check if script tags are specified from CMD + script_tags = os.environ.get('CM_GUI_SCRIPT_TAGS','').strip() + + script_tags_from_url = query_params.get('tags',['']) + if len(script_tags_from_url)>0: + x_script_tags_from_url = script_tags_from_url[0].strip() + if x_script_tags_from_url != '': + script_tags = x_script_tags_from_url + + meta = {} + + if script_tags !='': + # Check type of tags + if ' ' in script_tags: + script_tags = script_tags.replace(' ',',') + + print ('Searching CM scripts using tags "{}"'.format(script_tags)) + + r = cmind.access({'action':'find', + 'automation':'script,5b4e0237da074764', + 'tags':script_tags}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==1: + script = lst[0] + meta = script.meta + script_path = script.path + script_alias = meta['alias'] + + + + # Read meta + if len(meta)==0 and script_path!='' and os.path.isdir(script_path): + fn = os.path.join(script_path, '_cm') + r = cmind.utils.load_yaml_and_json(fn) + if r['return'] == 0: + meta = r['meta'] + script_path = script.path + script_alias = meta['alias'] + + import script + + ii = {'st': st, + 'params': query_params, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, + 'script_meta': meta, + 'skip_bottom': False} + + return script.page(ii) + +if __name__ == "__main__": + main() diff --git a/script/gui/customize.py b/script/gui/customize.py new file mode 100644 index 0000000000..9c920ab2c4 --- /dev/null +++ b/script/gui/customize.py @@ -0,0 +1,64 @@ +# Developer(s): Grigori Fursin + +from cmind import utils + +import os +import json +import shutil +import subprocess + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + cm = i['automation'].cmind + + script_tags = env.get('CM_GUI_SCRIPT_TAGS','') + + if script_tags != '': + # Check type of tags + if ' ' in script_tags: + script_tags = script_tags.replace(' ',',') + + print ('Searching CM scripts using tags "{}"'.format(script_tags)) + + r = cm.access({'action':'find', + 'automation':'script', + 'tags':script_tags}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==1: + script = lst[0] + env['CM_GUI_SCRIPT_PATH'] = script.path + env['CM_GUI_SCRIPT_ALIAS'] = script.meta['alias'] + + print ('Script found in path {}'.format(script.path)) + + env['CM_GUI_SCRIPT_TAGS'] = script_tags + + # Check other vars and assemble extra CMD + extra_cmd = env.get('CM_GUI_EXTRA_CMD','') + + port = env.get('CM_GUI_PORT', '') + address = env.get('CM_GUI_ADDRESS', '') + no_browser = env.get('CM_GUI_NO_BROWSER', '') + + if no_browser!='': + extra_cmd+=' --server.headless true' + + if address!='': + extra_cmd+=' --server.address='+address + + if port!='': + extra_cmd+=' --server.port='+port + + env['CM_GUI_EXTRA_CMD'] = extra_cmd + + print ('Prepared extra CMD for streamlit: {}'.format(extra_cmd)) + + return {'return':0} diff --git a/script/gui/graph.py b/script/gui/graph.py new file mode 100644 index 0000000000..f06109c78f --- /dev/null +++ b/script/gui/graph.py @@ -0,0 +1,778 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.colors as mcolors + +import numpy as np +import pandas as pd + +import mpld3 +from mpld3 import plugins +from mpld3 import utils + +security = ['os.', 'streamlit.', 'matplotlib.', 'numpy.', 'pandas.', 'mpld3.'] + + +repro_badges={ + 'acm_ctuning_repro_badge_functional':{'img':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, + 'acm_ctuning_repro_badge_reproduce':{'img':'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, + 'acm_ctuning_repro_badge_support_docker':{'img':'https://cTuning.org/images/docker_logo2_small.png'}, + 'acm_ctuning_repro_badge_cm_interface':{'img':'https://cTuning.org/images/logo-ck-single-tr4.png'} + } + + +class OpenBrowserOnClick(mpld3.plugins.PluginBase): + + JAVASCRIPT=""" + + mpld3.register_plugin("openbrowseronclick", PointClickableHTMLTooltip); + + PointClickableHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype); + PointClickableHTMLTooltip.prototype.constructor = PointClickableHTMLTooltip; + PointClickableHTMLTooltip.prototype.requiredProps = ["id"]; + PointClickableHTMLTooltip.prototype.defaultProps = {targets:null}; + + function PointClickableHTMLTooltip(fig, props){ + mpld3.Plugin.call(this, fig, props); + }; + + PointClickableHTMLTooltip.prototype.draw = function(){ + var obj = mpld3.get_element(this.props.id); + var targets = this.props.targets; + obj.elements() + .on("mousedown", function(d, i){ + window.open(targets[i]); + }); + }; + + """ + + def __init__(self, points, targets=None): + self.points = points + self.targets = targets + self.dict_ = {"type": "openbrowseronclick", + "id": mpld3.utils.get_id(points, None), + "targets": targets} + + + + + + +def main(): + + params = misc.get_params(st) + + # Set title + st.title('CM experiment visualization') + + return visualize(st, params) + + + + +def visualize(st, query_params, action = ''): + + # Query experiment + result_uid = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_RESULT_UID','') + q_result_uid = query_params.get('result_uid',['']) + if len(q_result_uid)>0: + if q_result_uid[0]!='': + result_uid = q_result_uid[0] + + v_experiment_name = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_NAME','') + q_experiment_name = query_params.get('name',['']) + if len(q_experiment_name)>0: + if q_experiment_name[0]!='': + v_experiment_name = q_experiment_name[0] + + v_experiment_tags='' + if v_experiment_name=='': + v_experiment_tags = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_TAGS','') + q_experiment_tags = query_params.get('tags',['']) + if len(q_experiment_tags)>0: + if q_experiment_tags[0]!='': + v_experiment_tags = q_experiment_tags[0] + v_experiment_tags = v_experiment_tags.replace(',',' ') + + # Check default +# if v_experiment_tags == '' and v_experiment_name == '': +# v_experiment_tags = 'mlperf-inference v4.0' + + v_experiment_tags = st.text_input('Select CM experiment tags separated by space:', value=v_experiment_tags, key='v_experiment_tags').strip() + v_experiment_tags = v_experiment_tags.replace(',',' ') + + # Get all experiment names + ii = {'action':'find', + 'automation':'experiment,a0a2d123ef064bcb'} + + # If name is given, do not use tags + if v_experiment_name!='': + ii['artifact']=v_experiment_name + elif v_experiment_tags!='': + ii['tags']=v_experiment_tags.replace(' ',',') + + r = cmind.access(ii) + if r['return']>0: return r + + lst_all = r['list'] + + experiments = [''] + + selection = 0 + index = 1 + for l in sorted(lst_all, key=lambda x: ( + ','.join(x.meta.get('tags',[])), + x.meta.get('alias',''), + x.meta['uid'] + )): + + meta = l.meta + + if v_experiment_name!='' and (v_experiment_name == meta['alias'] or v_experiment_name == meta['uid']): + selection = index + + name = ' '.join(meta.get('tags',[])) + if name =='': name = meta.get('alias', '') + if name =='': name = meta['uid'] + + + experiments.append(name) + + index+=1 + + if len(lst_all) == 1: + selection = 1 + + # Show experiment artifacts + experiment = st.selectbox('Select experiment from {} found:'.format(len(experiments)-1), + range(len(experiments)), + format_func=lambda x: experiments[x], + index=selection, + key='experiment') + + + lst = [lst_all[experiment-1]] if experiment > 0 else lst_all + + if len(lst)>8: + st.markdown('Too many experiments - continue pruning ...') + return {'return':0} + + + # Check experiments + results = [] + results_with_password = [] + passwords = [] + results_meta = {} + + for experiment in lst: + path = experiment.path + + for d in os.listdir(path): + path2 = os.path.join(path, d) + if os.path.isdir(path2): + path_to_result = os.path.join(path, d, 'cm-result.json') + + if os.path.isfile(path_to_result): + emeta = experiment.meta + + desc = {'path':path_to_result, + 'experiment_dir': d, + 'experiment_uid':emeta['uid'], + 'experiment_alias':emeta['alias'], + 'experiment_tags':','.join(emeta.get('tags',[]))} + + add = True + if result_uid!='': + add = False + r = cmind.utils.load_json(path_to_result) + if r['return'] == 0: + meta = r['meta'] + + results_meta[path_to_result] = meta + + for m in meta: + if m.get('uid','') == result_uid: + add = True + break + + if add: + pwd = experiment.meta.get('password_hash','') + if pwd=='': + results.append(desc) + else: + desc['password_hash'] = pwd + + if pwd not in passwords: + passwords.append(pwd) + + results_with_password.append(desc) + + # Check if password + if len(passwords)>0: + password = st.text_input('Some results are protected by password. Enter password to unlock them:', value='', key='v_experiment_pwd').strip() + + if password!='': + import bcrypt + # salt = bcrypt.gensalt() + # TBD: temporal hack to demo password protection for experiments + # salt = bcrypt.gensalt() + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt).decode('utf-8') + + for result in results_with_password: + if result['password_hash'] == password_hash2: + results.append(result) + + # How to visualize selection + if len(results)==0: + st.markdown('No results found!') + return {'return':0} + + + if st.session_state.get('tmp_cm_results','')=='': + st.session_state['tmp_cm_results']=len(results) + elif int(st.session_state['tmp_cm_results'])!=len(results): + st.session_state['tmp_cm_results']=len(results) + st.session_state['how']=0 + + + how = '' + + if result_uid=='': + v_max_results = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS','') + + if v_max_results!='' and len(results)>int(v_max_results): + st.markdown('Too many results - continue pruning ...') + return {'return':0} + + v_how = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_HOW','') + q_how = query_params.get('type',['']) + if len(q_how)>0: + if q_how[0]!='': + v_how = q_how[0] + + how_selection = ['', '2d-static', '2d', 'bar'] + how_selection_desc = ['', 'Scatter plot (static)', 'Scatter plot (interactive, slow - to be improved)', 'Bar plot (static)'] + + how_index = 0 + if v_how!='' and v_how in how_selection: + how_index = how_selection.index(v_how) + + how2 = st.selectbox('Select how to visualize {} CM experiment set(s):'.format(len(results)), + range(len(how_selection_desc)), + format_func=lambda x: how_selection_desc[x], + index = how_index, + key = 'how') + + + if how2 == '' or how2 == 0: + return {'return':0} + + how = how_selection[how2] + + how = how.strip() + + # Continue visualizing + all_values = [] + keys = [] + all_data = [] + + + derived_metrics_value = query_params.get('derived_metrics',[''])[0].strip() + derived_metrics_value = st.text_input("Optional: add derived metrics in Python. Example: result['Accuracy2'] = result['Accuracy']*2", + value = derived_metrics_value).strip() + + for x in security: + if x in derived_metrics_value: + derived_metrics_value='' + break + + error_shown2 = False + for desc in results: + path_to_result = desc['path'] + + if path_to_result in results_meta: + result_meta = results_meta[path_to_result] + else: + r = cmind.utils.load_json_or_yaml(path_to_result) + if r['return']>0: return r + + result_meta = r['meta'] + + for result in result_meta: + # Add extra info + for k in ['experiment_dir', 'experiment_alias', 'experiment_uid', 'experiment_tags']: + if k in desc: + result[k]=desc[k] + + if derived_metrics_value!='': + try: + exec(derived_metrics_value) + except Exception as e: + if not error_shown2: + st.markdown('*Syntax error in derived metrics: {}*'.format(e)) + error_shown2 = True + + all_values.append(result) + + for k in result.keys(): + if k not in keys: + keys.append(k) + + first_keys = ['Organization', 'Model', 'Scenario', 'SystemName', 'notes', 'framework', 'Result', 'Result_Units', 'Accuracy'] + sorted_keys = [k for k in first_keys if k in keys] + [k for k in sorted(keys, key=lambda s: s.lower()) if k not in first_keys] + + filter_value = query_params.get('filter',[''])[0].strip() + if result_uid=='': # and filter_value!='': + filter_value = st.text_input("Optional: add result filter in Python. Examples: result['Accuracy']>75 or 'llama2' in result['Model']", value = filter_value).strip() + + st.markdown('---') + + for x in security: + if x in filter_value: + filter_value='' + break + + # all_values is a list of dictionaries with all keys + error_shown=False + for result in all_values: + + if filter_value!='': + try: + if not eval(filter_value): + continue + except Exception as e: + if not error_shown: + st.markdown('*Syntax error in filter: {}*'.format(e)) + error_shown = True + + # Check if 1 result UID is selected + if result_uid!='' and result.get('uid','')!=result_uid: + continue + + data = [] + for k in sorted_keys: + data.append(result.get(k)) + + all_data.append(data) + + if result_uid!='': break + + ################################################### + if len(all_data)==0: + st.markdown('No results found for your selection.') + return {'return':0, 'end_html':end_html} + + + + + + ################################################### + # If experiment found and 1 UID, print a table + if result_uid!='': + st.markdown('---') + st.markdown('# Result summary') + + + data = all_data[0] + + + result = {} + + j=0 + for k in sorted_keys: + result[k] = data[j] + j+=1 + + + # Check badges + x = '' + + for k in repro_badges: + if result.get(k, False): + img = repro_badges[k]['img'] + + x += '\n'.format(img) + + if x!='': + st.write('
\n'+x+'\n
\n', unsafe_allow_html = True) + + + x = '' + for k in sorted_keys: + x+='* **{}**: {}\n'.format(k,str(result[k])) + + st.markdown(x) + + # Check associated reports + r=cmind.access({'action':'find', + 'automation':'report,6462ecdba2054467', + 'tags':'result-{}'.format(result_uid)}) + if r['return']>0: return r + + lst = r['list'] + + for l in lst: + report_path = l.path + + f1 = os.path.join(report_path, 'README.md') + if os.path.isfile(f1): + report_meta = l.meta + + report_alias = report_meta['alias'] + report_title = report_meta.get('title','') + + report_name = report_title if report_title!='' else report_alias + + r = cmind.utils.load_txt(f1) + if r['return']>0: return r + + s = r['string'] + + st.markdown('---') + st.markdown('### '+report_name) + + st.markdown(s) + + + # Create self link + st.markdown("""---""") + + experiment_alias_or_uid = result['experiment_uid'] + + end_html=''' +
+ Self link +
+ '''.format(misc.make_url(experiment_alias_or_uid, action=action, md=False), result_uid) + + st.write(end_html, unsafe_allow_html=True) + + + return {'return':0} + + + + + + + ################################################### + # Select 2D keys + axis_key_x='' + axis_key_y='' + axis_key_c='' + + if len(keys)>0: + keys = [''] + sorted_keys + + axis_key_x = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X','') + q_axis_key_x = query_params.get('x',['']) + if len(q_axis_key_x)>0: + if q_axis_key_x[0]!='': + axis_key_x = q_axis_key_x[0] + i_axis_key_x = 0 + if axis_key_x != '' and axis_key_x in keys: i_axis_key_x = keys.index(axis_key_x) + if axis_key_x == '' and 'Result' in keys: i_axis_key_x = keys.index('Result') + axis_key_x = st.selectbox('Select X key', keys, index=i_axis_key_x, key='x') + + axis_key_y = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y','') + q_axis_key_y = query_params.get('y',['']) + if len(q_axis_key_y)>0: + if q_axis_key_y[0]!='': + axis_key_y = q_axis_key_y[0] + i_axis_key_y = 0 + if axis_key_y != '' and axis_key_y in keys: i_axis_key_y = keys.index(axis_key_y) + if axis_key_y == '' and 'Accuracy' in keys: i_axis_key_y = keys.index('Accuracy') + axis_key_y = st.selectbox('Select Y key', keys, index=i_axis_key_y, key='y') + + axis_key_c = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C','') + q_axis_key_c = query_params.get('c',['']) + if len(q_axis_key_c)>0: + if q_axis_key_c[0]!='': + axis_key_c = q_axis_key_c[0] + i_axis_key_c = 0 + if axis_key_c != '' and axis_key_c in keys: i_axis_key_c = keys.index(axis_key_c) + if axis_key_c == '' and 'version' in keys: i_axis_key_c = keys.index('version') + axis_key_c = st.selectbox('Select Color key', keys, index=i_axis_key_c, key='c') + + axis_key_s = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S','') + q_axis_key_s = query_params.get('s',['']) + if len(q_axis_key_s)>0: + axis_key_s = q_axis_key_s[0] + i_axis_key_s = 0 + if axis_key_s != '' and axis_key_s in keys: i_axis_key_s = keys.index(axis_key_s) + axis_key_s = st.selectbox('Select Style key', keys, index=i_axis_key_s, key='s') + + + # Select values + values = [] + + if axis_key_x!='' and axis_key_y!='': + for v in all_values: + x = v.get(axis_key_x, None) + y = v.get(axis_key_y, None) + + if x!=None and y!=None: + values.append(v) + + if len(values)>0: + + #fig, ax = plt.subplots(figsize=(12,6)) + fig, ax = plt.subplots() #figsize=(6,4)) + + ax.set_xlabel(axis_key_x) + ax.set_ylabel(axis_key_y) + + title = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_TITLE', '') + q_title = query_params.get('title',['']) + if len(q_title)>0: + if q_title[0]!='': + title = q_title[0] + ax.set_title(title, size=16) + + if how == 'bar': + ax.set_title('Under development ...', size=16) + ax.yaxis.grid(linestyle = 'dotted') + else: + ax.grid(linestyle = 'dotted') + #https://matplotlib.org/stable/api/markers_api.html + + unique_color_values = {} +# unique_colors = list(mcolors.CSS4_COLORS.keys()) + unique_colors = list(mcolors.TABLEAU_COLORS.keys()) + i_unique_color_values = 0 + + unique_style_values = {} +# unique_styles = ['o','v','^','<','>','1','2','3','4','8','s','p','P','*','+','D'] + unique_styles = ['circle', 'square', 'diamond', 'cross', 'x', 'triangle', 'pentagon', 'hexagram', + 'star', 'hourglass', 'bowtie', 'asterisk', 'hash'] + i_unique_style_values = 0 + + # If Bar, use Style to separate results + unique_x_values = [] + unique_s_values = [] + + experiment_uids = [] + + # Filter values + values2 = [] + + for result in values: + if filter_value!='': + try: + if not eval(filter_value): + continue + except Exception as e: + if not error_shown: + st.markdown('*Syntax error in filter: {}*'.format(e)) + error_shown = True + + values2.append(result) + + if how == 'bar': + x = result.get(axis_key_x, None) + if x != None and x!='' and x not in unique_x_values: + unique_x_values.append(x) + + s = result.get(axis_key_s, None) + if s != None and s!='' and s not in unique_s_values: + unique_s_values.append(s) + + ############################################################################ + # Continue visualizing + if how == '2d-static' or how == 'bar': + + xx = [] + yy = [] + cc = [] + ss = [] + io = [] + + t = 0 + for result in values2: + v = result + + t+=1 + + x = v.get(axis_key_x, None) + y = v.get(axis_key_y, None) + + xx.append(x) + yy.append(y) + + color = 'blue' + if axis_key_c!='': + c = v.get(axis_key_c, None) + if c!=None: + if c in unique_color_values: + color = unique_color_values[c] + else: + color = unique_colors[i_unique_color_values] + unique_color_values[c] = color + if i_unique_color_values<(len(unique_colors)-1): + i_unique_color_values+=1 + + cc.append(color) + + style = 'o' + if axis_key_s!='': + s = v.get(axis_key_s, None) + if s!=None: + if s in unique_style_values: + style = unique_style_values[s] + else: + style = unique_styles[i_unique_style_values] + unique_style_values[s] = style + if i_unique_style_values<(len(unique_styles)-1): + i_unique_style_values+=1 + + ss.append(style) + + info='' + for key in sorted(v.keys(), key=lambda x: x.lower()): + value = v[key] + info+=str(key)+': '+str(value)+'
\n' + + io.append(info) + + import plotly.express as px + + dd = {axis_key_x:xx,axis_key_y:yy,axis_key_c:cc,axis_key_s:ss,'info':io} + + # https://docs.streamlit.io/library/api-reference/charts/st.bar_chart + # https://docs.streamlit.io/library/api-reference/charts/st.plotly_chart + # https://plotly.com/python/line-and-scatter/ + + df = pd.DataFrame(dd) + + if how == 'bar': + st.bar_chart(df, x=axis_key_x, y=axis_key_y) + else: + fig = px.scatter(df, x=axis_key_x, y=axis_key_y, color=axis_key_c, symbol=axis_key_s, hover_name='info', height=1000) + + st.plotly_chart(fig, theme="streamlit", use_container_width=True) + + + + elif how == '2d': + ##################################################################### + # 2D interactive graph - very slow - need to be updated + width = 1 + + t = 0 + for result in values2: + v = result + + t+=1 + + x = v.get(axis_key_x, None) + y = v.get(axis_key_y, None) + + url = v.get('url','') + if url=='': url = v.get('git_url','') + + color = 'blue' + if axis_key_c!='': + c = v.get(axis_key_c, None) + if c!=None: + if c in unique_color_values: + color = unique_color_values[c] + else: + color = unique_colors[i_unique_color_values] + unique_color_values[c] = color + if i_unique_color_values<(len(unique_colors)-1): + i_unique_color_values+=1 + + style = 'o' + if axis_key_s!='': + s = v.get(axis_key_s, None) + if s!=None: + if s in unique_style_values: + style = unique_style_values[s] + else: + style = unique_styles[i_unique_style_values] + unique_style_values[s] = style + if i_unique_style_values<(len(unique_styles)-1): + i_unique_style_values+=1 + + graph = ax.scatter(x, y, color=color, marker=style) + + info='' + for key in sorted(v.keys(), key=lambda x: x.lower()): + value = v[key] + info+=''+str(key)+': '+str(value)+'
\n' + + info2 = '
'+info+'
' + + label = [info2] + plugins.connect(fig, plugins.PointHTMLTooltip(graph, label)) + + experiment_uid = v.get('experiment_uid','') + if experiment_uid!='' and experiment_uid not in experiment_uids: + experiment_uids.append(experiment_uid) + + uid = v.get('uid','') + if uid!='': + xaction = 'action={}&'.format(action) if action!='' else '' + url = '?{}name={}&result_uid={}'.format(xaction, experiment_uid, uid) + + if url!='': + targets = [url] + plugins.connect(fig, OpenBrowserOnClick(graph, targets = targets)) + + # Render graph + fig_html = mpld3.fig_to_html(fig) + components.html(fig_html, width=1100, height=500) + + #fig_html = '
'+fig_html+'
' + + #components.html(fig_html, width=1000, height=800) + #st.markdown('---') + + ######################################################################## + # Show all data + df = pd.DataFrame( + all_data, + columns=(k for k in sorted_keys if k!='') + ) + + st.markdown('---') + st.dataframe(df) + + # Check if can create self link + if len(experiment_uids)==1: + st.markdown("""---""") + + xtype = '&type={}'.format(how) if how!='' else '' + + end_html=''' +
+ Self link +
+ '''.format(misc.make_url(experiment_uids[0], action=action, md=False), xtype) + + st.write(end_html, unsafe_allow_html=True) + + + return {'return':0} + + + + +if __name__ == "__main__": + r = main() + + if r['return']>0: + + st.markdown("""---""") + st.markdown('**Error detected by CM:** {}'.format(r['error'])) diff --git a/script/gui/install/linux.md b/script/gui/install/linux.md new file mode 100644 index 0000000000..4ee277a7b3 --- /dev/null +++ b/script/gui/install/linux.md @@ -0,0 +1,10 @@ +```bash +sudo apt update && sudo apt upgrade +sudo apt install python3 python3-pip python3-venv git wget curl +``` + +*Note that you must set up virtual env on Ubuntu 23+ before using any Python project including CM:* +```bash +python3 -m venv cm +source cm/bin/activate +``` diff --git a/script/gui/install/macos.md b/script/gui/install/macos.md new file mode 100644 index 0000000000..bc36c37756 --- /dev/null +++ b/script/gui/install/macos.md @@ -0,0 +1,22 @@ +*Note that CM currently does not work with Python installed from the Apple Store. + Please install Python via brew as described below.* + +If `brew` package manager is not installed, please install it as follows (see details [here](https://brew.sh/)): +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +``` + +Don't forget to add brew to PATH environment as described in the end. + +Then install python, pip, git and wget: + +```bash +brew install python3 git wget curl + +python3 -m pip install cmind +``` + +*Sometimes python does not add `cm` and `cmr` binaries to the `PATH` environment variable. + You may need to find these files and add their path to `PATH` variable. + We plan to simplify this installation in the future.* + diff --git a/script/gui/install/redhat.md b/script/gui/install/redhat.md new file mode 100644 index 0000000000..13f1c6a0e9 --- /dev/null +++ b/script/gui/install/redhat.md @@ -0,0 +1,7 @@ +*We have successfully tested CM on Red Hat 9 and CentOS 8:* + +```bash +sudo dnf update +sudo dnf install python3 python-pip git wget curl + +``` diff --git a/script/gui/install/windows.md b/script/gui/install/windows.md new file mode 100644 index 0000000000..8e1d50bfc1 --- /dev/null +++ b/script/gui/install/windows.md @@ -0,0 +1,15 @@ +* Configure Windows 10+ to [support long paths](https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry#enable-long-paths-in-windows-10-version-1607-and-later) from command line as admin: + ```bash + reg add "HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem" /v LongPathsEnabled /t REG_DWORD /d 1 /f + ``` +* Download and install Git from [git-for-windows.github.io](https://git-for-windows.github.io). + * Configure Git to accept long file names: `git config --system core.longpaths true` +* Download and install Python 3+ from [www.python.org/downloads/windows](https://www.python.org/downloads/windows). + * Don't forget to select option to add Python binaries to PATH environment! + * Configure Windows to accept long fie names during Python installation! + +*Note that we [have reports](https://github.com/mlcommons/ck/issues/844) + that CM does not work when Python was first installed from the Microsoft Store. + If CM fails to run, you can find a fix [here](https://stackoverflow.com/questions/57485491/python-python3-executes-in-command-prompt-but-does-not-run-correctly)*. + +*We plan to provide a self-sustained package in the future to simplify CM installation on Windows.* diff --git a/script/gui/misc.py b/script/gui/misc.py new file mode 100644 index 0000000000..a2b00ad233 --- /dev/null +++ b/script/gui/misc.py @@ -0,0 +1,220 @@ +# Support functions + +########################################################## +def make_url(name, alias='', action='contributors', key='name', md=True, skip_url_quote=False): + + import urllib + + if alias == '': alias = name + + x = urllib.parse.quote_plus(alias) if not skip_url_quote else alias + + xaction = '' + if action != '': + xaction = 'action={}'.format(action) + if key!='': + xaction+='&' + + + url = '?{}'.format(xaction) + + if key!='': + url+='{}={}'.format(key,x) + + if md: + md = '[{}]({})'.format(name, url) + else: + md = url + + return md + +########################################################## +def convert_date(date): + # date: format YYYYMMDD to YYYY month day + + import calendar + + try: + year = date[0:4] + month = calendar.month_abbr[int(date[4:6])] + day = str(int(date[6:8])) + except Exception as e: + return {'return':1, 'error':'date "{}" is not of format YYYYMMDD: {}'.format(date, format(e))} + + return {'return':0, 'string':year+' '+month+' '+day} + +########################################################## +def get_params(st): + compatibility = False + + try: + params2 = st.query_params + # Convert to old style + params = {} + for k in params2: + v = params2[k] + if type(v)!=list: + params[k]=[v] + except: + compatibility = True + + if compatibility: + params = st.experimental_get_query_params() + + return params + +########################################################## +def get_all_deps_tags(i): + meta = i['meta'] + all_deps_tags = i.get('all_deps_tags', []) + + for k in meta: + v = meta[k] + + if k == 'tags': + if type(v) == list: + v = ','.join(v) + + if v not in all_deps_tags: + all_deps_tags.append(v) + + elif type(v) == dict: + r = get_all_deps_tags({'meta':v, 'all_deps_tags':all_deps_tags}) + all_deps_tags = r['all_deps_tags'] + + elif type(v) == list: + for vv in v: + if type(vv) == dict: + r = get_all_deps_tags({'meta':vv, 'all_deps_tags':all_deps_tags}) + all_deps_tags = r['all_deps_tags'] + + return {'return':0, 'all_deps_tags':all_deps_tags} + +########################################################## +def make_selector(i): + + key = i['key'] + value = i['desc'] + + params = i['params'] + + st = i['st'] + st_inputs = i['st_inputs'] + + hide = i.get('hide', False) + + key2 = '@'+key + + value2 = None + + if type(value) == dict: + desc = value['desc'] + + choices = value.get('choices', []) + boolean = value.get('boolean', False) + default = value.get('default', '') + force = value.get('force', None) + + if force != None: + value2 = force + if not hide: + st.markdown('**{}:** {}'.format(desc, str(force))) + + else: + if boolean: + v = default + x = params.get(key2, None) + if x!=None and len(x)>0 and x[0]!=None: + if x[0].lower()=='true': + v = True + elif x[0].lower()=='false': + v = False + if hide: + value2 = v + else: + value2 = st.checkbox(desc, value=v, key=key2) + elif len(choices)>0: + x = params.get(key2, None) + if x!=None and len(x)>0 and x[0]!=None: + x = x[0] + if x in choices: + selected_index = choices.index(x) if x in choices else 0 + else: + selected_index = choices.index(default) if default!='' else 0 + else: + selected_index = choices.index(default) if default!='' else 0 + if hide: + value2 = choices[selected_index] + else: + value2 = st.selectbox(desc, choices, index=selected_index, key=key2) + else: + v = default + x = params.get(key2, None) + if x!=None and len(x)>0 and x[0]!=None: + v = x[0] + if hide: + value2 = v + else: + value2 = st.text_input(desc, value=v, key=key2) + + st_inputs[key2] = value2 + + else: + desc = value + if hide: + value2 = desc + else: + value2 = st.text_input(desc) + st_inputs[key2] = value2 + + return {'return':0, 'key2': key2, 'value2': value2} + +########################################################## +def make_selection(st, selection, param_key, text, x_uid, force_index=0): + + x_meta = {} + + if len(selection)>0: + selection = sorted(selection, key = lambda v: v['name']) + + if x_uid != '': + x_meta = selection[0] + st.markdown('**Selected {}:** {}'.format(text, x_meta['name'])) + else: + x_selection = [{'name':''}] + x_selection += selection + + x_id = st.selectbox('Select {}:'.format(text), + range(len(x_selection)), + format_func=lambda x: x_selection[x]['name'], + index = force_index, + key = param_key) + + if x_id>0: + x_meta = x_selection[x_id] + + return {'return':0, 'meta':x_meta} + +################################################################################## +def get_with_complex_key_safe(meta, key): + v = get_with_complex_key(meta, key) + + if v == None: v='' + + return v + +################################################################################## +def get_with_complex_key(meta, key): + + j = key.find('.') + + if j<0: + return meta.get(key) + + key0 = key[:j] + + if key0 not in meta: + return None + + return get_with_complex_key(meta[key0], key[j+1:]) + diff --git a/script/gui/playground.py b/script/gui/playground.py new file mode 100644 index 0000000000..9c1a2f40cc --- /dev/null +++ b/script/gui/playground.py @@ -0,0 +1,203 @@ +# Developer(s): Grigori Fursin + +import streamlit as st +from streamlit.components.v1 import html +from streamlit_option_menu import option_menu + +import os +import cmind +import misc + +def main(): + + st.set_page_config(layout="wide", + menu_items={}) + + params = misc.get_params(st) + + # Set style + # Green: background:#7fcf6f; + hide_streamlit_style = """ + + """ + + st.markdown(hide_streamlit_style, unsafe_allow_html=True) + + # Set title (check extra user HTML to embed before title if needed) + extra = os.environ.get('CM_GUI_EXTRA_HTML','') + + if extra!='': + url = '' + for p in params: + v=str(','.join(params[p])) + if url!='': url+=';' + url+=p+'='+v + extra=extra.replace('{{CM_URL}}', url)+'\n\n' + + st.write(''' +
+

Collective Knowledge Playground

+
+ {} +
+
+ '''.format(extra), + unsafe_allow_html=True + ) + + extra_file = os.environ.get('CM_GUI_EXTRA_HTML_FILE','') + if extra_file!='': + r = cmind.utils.load_txt(extra_file) + if r['return']>0: return r + + s = '\n\n'+r['string']+'\n\n' + + st.write(s, unsafe_allow_html=True) + + + # Check action and basic menu + action = params.get('action',['scripts'])[0].lower() + + style_action_scripts='font-style:italic;font-weight:bold;color:#ffffff' if action=='scripts' else '' + style_action_howtorun='font-style:italic;font-weight:bold;color:#ffffff' if action=='howtorun' else '' + style_action_challenges='font-style:italic;font-weight:bold;color:#ffffff' if action=='challenges' else '' + style_action_contributors='font-style:italic;font-weight:bold;color:#ffffff' if action=='contributors' else '' + style_action_experiments='font-style:italic;font-weight:bold;color:#ffffff' if action=='experiments' else '' + style_action_reproduce='font-style:italic;font-weight:bold;color:#ffffff' if action=='reproduce' else '' + style_action_apps='font-style:italic;font-weight:bold;color:#ffffff' if action=='apps' else '' + style_action_reports='font-style:italic;font-weight:bold;color:#ffffff' if action=='reports' else '' + style_action_beta='font-style:italic;font-weight:bold;color:#ffffff' if action=='beta' else '' + style_action_install='font-style:italic;font-weight:bold;color:#ffffff' if action=='install' else '' + + st.write(''' +
+ + + + +
+ + + +
+ + + +
+ '''.format( + style_action_scripts, + style_action_howtorun, + style_action_challenges, + style_action_experiments, + style_action_reproduce, + style_action_contributors, + style_action_reports, + style_action_beta, + style_action_apps, + style_action_install + ), + unsafe_allow_html=True + ) + + # Check actions +# st.markdown("""---""") + st.markdown('') + + r={'return':0} + + if action == 'challenges': + from playground_challenges import page + r = page(st, params) + elif action == 'howtorun': + from playground_howtorun import page + r = page(st, params) + elif action == 'experiments': + from graph import visualize + r = visualize(st, params, action = 'experiments') + elif action == 'contributors': + from playground_contributors import page + r = page(st, params) + elif action == 'scripts' or action == 'recipes' or action == 'automation-recipes' or action == 'components': + from playground_scripts import page + r = page(st, params) + elif action == 'reproduce' or action == 'repro' or action == 'reproducibility': + from playground_reproduce import page + r = page(st, params) + elif action == 'apps' or action == 'optimized-apps': + from playground_apps import page + r = page(st, params) + elif action == 'reports': + from playground_reports import page + r = page(st, params) + elif action == 'beta': + from playground_beta import page + r = page(st, params) + elif action == 'install' or action == 'setup': + from playground_install import page + r = page(st, params, {}) + + if r['return']>0: + st.markdown('**CM error:** {} . Please report [here](https://github.com/mlcommons/ck/issues)'.format(r['error'])) + + end_html=r.get('end_html','') + + + # Finalize all pages + st.markdown("""---""") + + if end_html!='': + st.write(end_html, unsafe_allow_html=True) + + st.write(""" +
+ Powered by MLCommons Collective Mind +
+ """, + unsafe_allow_html=True) + + +def make_url(name, alias='', action='contributors', key='name', md=True): + + import urllib + + if alias == '': alias = name + + url = '?action={}&{}={}'.format(action, key, urllib.parse.quote_plus(alias)) + + if md: + md = '[{}]({})'.format(name, url) + else: + md = url + + return md + + +def convert_date(date): + # date: format YYYYMMDD to YYYY month day + + import calendar + + try: + year = date[0:4] + month = calendar.month_abbr[int(date[4:6])] + day = str(int(date[6:8])) + except Exception as e: + return {'return':1, 'error':'date "{}" is not of format YYYYMMDD: {}'.format(date, format(e))} + + return {'return':0, 'string':year+' '+month+' '+day} + + +if __name__ == "__main__": + main() diff --git a/script/gui/playground_apps.py b/script/gui/playground_apps.py new file mode 100644 index 0000000000..9af5fca444 --- /dev/null +++ b/script/gui/playground_apps.py @@ -0,0 +1,40 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' + +initialized = False +external_module_path = '' +external_module_meta = {} + +def main(): + params = misc.get_params(st) + + # Set title + st.title('How to run benchmarks') + + st.markdown(announcement) + + return page(st, params) + + + + +def page(st, params, action = ''): + + global initialized, external_module_path, external_module_meta + + end_html = '' + + st.markdown('----') + st.markdown(announcement) + + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_beta.py b/script/gui/playground_beta.py new file mode 100644 index 0000000000..f5636404d2 --- /dev/null +++ b/script/gui/playground_beta.py @@ -0,0 +1,35 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + +def page(st, params): + + current_script_path = os.environ.get('CM_TMP_CURRENT_SCRIPT_PATH', '') + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + + name = params.get('name',[''])[0].strip() + tags = params.get('tags',[''])[0].lower() + + readme = os.path.join(current_script_path, 'playground_beta_README.md') + + md = '' + + if os.path.isfile(readme): + + r = cmind.utils.load_txt(readme) + if r['return']>0: return r + + md += r['string'] + + md = md.replace('{{URL_PREFIX}}', url_prefix) + +# st.markdown(md) + st.write(md, unsafe_allow_html = True) + + end_html='' + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_beta_README.md b/script/gui/playground_beta_README.md new file mode 100644 index 0000000000..77712f3c43 --- /dev/null +++ b/script/gui/playground_beta_README.md @@ -0,0 +1,10 @@ +## Beta features (under development) + +Here are a few on-going projects to extend the Collective Knowledge Playground +being developed by [cKnowledge.org](https://cKnowledge.org) and [cTuning.org](https://cTuning.org) +using [Collective Mind automation recipes (CM scripts)]({{URL_PREFIX}}?action=scripts): + +* [MLPerf results explorer](https://access.cknowledge.org/mlperf-explorer) +* [LLM-based assistant to run MLPerf benchmarks](https://access.cknowledge.org/assistant) + +Feel free to suggest your projects and extensions using [GitHub issues](https://github.com/mlcommons/ck/issues)! diff --git a/script/gui/playground_challenges.py b/script/gui/playground_challenges.py new file mode 100644 index 0000000000..0a840403d5 --- /dev/null +++ b/script/gui/playground_challenges.py @@ -0,0 +1,502 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + + url_scripts = url_prefix + '?action=scripts' + url_contributors = url_prefix + '?action=contributors' + + + name = params.get('name',[''])[0].strip() + tags = params.get('tags',[''])[0].lower() + + ii = {'action':'find', + 'automation':'challenge,3d84abd768f34e08'} + + if name!='': + ii['artifact']=name + if tags!='': + ii['tags']=tags + + r = cmind.access(ii) + if r['return']>0: return r + + lst = r['list'] + + end_html = '' + + if len(lst)==0: + st.markdown('Challenges were not found!') + else: + artifact = None + + if len(lst)==1: + artifact = lst[0] + else: + challenges = [] + + date_now = datetime.datetime.now().isoformat() + date_now2 = int(date_now[0:4]+date_now[5:7]+date_now[8:10]) + + ongoing = [] + + for l in sorted(lst, key=lambda x: ( + -int(x.meta.get('date_open','0')), + -int(x.meta.get('date_close','0')), + x.meta.get('title','') + )): + + row = {} + + meta = l.meta + row['uid']= meta['uid'] + + name = meta.get('title', meta['alias']) + + row['name']=name + + if meta.get('hot', False): row['hot']=True + + for k in ['date_close_extension', 'points', 'trophies', 'prize', 'prize_short', 'skip', 'sort']: + if k in meta: + row[k]=meta[k] + + under_preparation = meta.get('under_preparation', False) + row['under_preparation']=under_preparation + + date_open = meta.get('date_open','') + date_close = meta.get('date_close','') + + s_date_open = '' + if date_open!='': + r = misc.convert_date(date_open) + s_date_open = r['string'] if r['return']==0 else '' + + row['orig_date_open']=date_open + row['date_open']=s_date_open + + s_date_close = '' + if date_close!='': + r = misc.convert_date(date_close) + s_date_close = r['string'] if r['return']==0 else '' + + row['orig_date_close']=date_close + row['date_close']=s_date_close + + diff1 = 0 + diff2 = 0 + + if date_open!='': + diff1 = int(date_open)-int(date_now2) + + if date_close!='': + diff2 = int(date_close)-int(date_now2) + + + prefix = '' + if under_preparation: + prefix = 'Under preparation: ' + else: + if date_open!='' and diff1>0: + prefix = 'Opens on {}: '.format(s_date_open) + else: + if date_close!='': + if diff2<0: + prefix = 'Finished on {}: '.format(s_date_close) + else: + prefix = 'Open and finishes on {}: '.format(s_date_close) + else: + prefix = 'Open: '.format(s_date_close) + + + # Check if open challenge even if under preparation + if date_open and (date_close=='' or (diff1<=0 and diff2>0)): + ongoing.append(row) + else: + challenges.append({'prefix':prefix, 'name':name, 'uid':l.meta['uid']}) + + + + + # Show ongoing if open + if len(ongoing)>0: + + # Check hot + hot = [] + ongoing_without_hot = [] + + for row in ongoing: + if row.get('hot', False): + hot.append(row) + else: + ongoing_without_hot.append(row) + + # Some info + x = ''' + + + Collaborative benchmarking and optimization of AI applications and systems + (latency, throughput, power consumption, accuracy, costs ...) + is organized by MLCommons, + cKnowledge + and the cTuning foundation + and powered by Collective Mind automation recipes. + We deeply thank all our participants and contributors! + + +
+
+ '''.format(url_scripts, url_contributors) + st.write(x, unsafe_allow_html = True) + + + # Check if hot + if len(hot)>0: + st.markdown('#### Hot challenges') + + md_tmp = '' + + for row in sorted(hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), + row.get('sort', 0), + row.get('name', ''), + row.get('under_preparation', False) + )): + x = row['name'] + x = x[0].upper() + x[1:] + + url = url_prefix + '?action=challenges&name={}'.format(row['uid']) + + + date_close = row.get('date_close','').strip() + y = ' (Closing date: **{}**)'.format(date_close) if date_close !='' else '' + + md_tmp += '* [{}]({}){}\n'.format(x, url, y) + + st.markdown(md_tmp) + + st.markdown('#### On-going challenges') + + + # Continue all + ind = 1 + + data = [] + + for row in sorted(ongoing_without_hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), + row.get('sort', 0), + row.get('name', ''), + row.get('under_preparation', False) + )): + if row.get('skip',False): continue + + xrow = [] + + md = '' + up = row.get('under_preparation', False) + + x = row['name'] + y = '' + if up: + x = x[0].lower() + x[1:] + y = 'Under preparation: ' + + url = url_prefix + '?action=challenges&name={}'.format(row['uid']) +# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) + + x = ''' +
+ + {}{} + +
+ '''.format(y, url, x).replace('\n','') +# st.write(x, unsafe_allow_html = True) + + xrow.append(x) + + # Assemble info + x='' + + date_close = row.get('date_close','') + y = '' + if date_close!='' and date_close!=None: + x += '   Closing date: **{}**\n'.format(date_close) + y = date_close.replace(' ',' ') + + xrow.append(y) + + y = '' + if row.get('date_close_extension',False): + y = 'until done' + + xrow.append(y) + +# points = row.get('points',0) +# y = '' +# if points>0: +# x += '   Points: **{}**\n'.format(str(points)) +# y = str(points) +# +# xrow.append(y) + + + + awards = '' + + trophies = row.get('trophies',False) + if trophies: + x += '   Trophy: **Yes**\n' + awards += '🏆' + + +# prize = row.get('prize_short','') +# if prize!='': +# x += '   Prizes from [MLCommons organizations](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https:/cKnowledge.org): **{}**\n'.format(prize) +# if awards!='': awards+=' , ' +# awards += prize +# +# xrow.append(awards) + + + if x!='': + md += '     '+x + +# st.markdown(md) + + + data.append(xrow) + ind+=1 + + + import pandas as pd + import numpy as np + + df = pd.DataFrame(data, + columns=['Challenge', 'Closing date', 'Extension']) + + df.index+=1 + +# st.table(df) + st.write(df.to_html(escape=False, justify='left'), unsafe_allow_html=True) + + # Show selector for all +# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', +# range(len(challenges)), +# format_func=lambda x: challenges[x], +# index=0, key='challenge') +# +# if challenge>0: +# artifact = artifacts[challenge] + + + + + # Process 1 challenge + if artifact is None: +# st.markdown('#### Past or future challenges:') + + st.markdown('#### Future or past challenges') + + + for c in challenges: + + prefix = c['prefix'] + name = c['name'] + uid = c['uid'] + + url = url_prefix + '?action=challenges&name={}'.format(uid) + + x = ''' +
+ {}) {}{} +
+ '''.format(str(ind), prefix, url, name) + + st.write(x, unsafe_allow_html = True) + + ind+=1 + + + + + + + + + + + + else: + meta = artifact.meta + + name = meta.get('title', meta['alias']) + uid = meta['uid'] + + st.write(''' +
+

Challenge: {}

+
+ '''.format(name), + unsafe_allow_html=True + ) + + end_html='
Self link
'.format(misc.make_url(meta['uid'], action='challenges', md=False)) + + + # Check basic password + password_hash = meta.get('password_hash','') + view = True + if password_hash!='': + view = False + + password = st.text_input("Enter password", type="password", key="password") + + if password!='': + import bcrypt + # TBD: temporal hack to demo password protection for experiments + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt) + + if password_hash.encode('utf-8')==password_hash2: + view=True + else: + st.markdown('**Warning:** wrong password') + + if not view: + return {'return':0, 'end_html':end_html} + + + + z = '' + date_open = meta.get('date_open','') + if date_open!='': + # Format YYYYMMDD + r = misc.convert_date(date_open) + if r['return']>0: return r + z+='* **Open date:** {}\n'.format(r['string']) + + date_close = meta.get('date_close','') + if date_close!='': + # Format YYYYMMDD + r = misc.convert_date(date_close) + if r['return']>0: return r + z+='* **Closing date:** {}\n'.format(r['string']) + + if meta.get('trophies', False): + z+='* **MLCommons Collective Knowledge Contributor award:** Yes\n' + + prize_short = meta.get('prize_short','') + if prize_short!='': + z+='* **Prizes:** {}\n'.format(prize_short) + +# prize = meta.get('prize','') +# if prize!='': +# z+='* **Student prizes:** {}\n'.format(prize) + + + urls = meta.get('urls',[]) + url = meta.get('url', '') + + if url!='': urls.append(url) + + if len(urls)>0: + x = '* **External link:** ' + md = '' + if len(urls)>1: + md = '* **External links:**\n' + x=' * ' + + for u in urls: + md+=x+'[{}]({})\n'.format(u,u) + z+=md+'\n' + + + # Check if has linked experiments + experiments = meta.get('experiments',[]) + + if len(experiments)>0: + md = '* **Shared experiments:**\n' + + for e in experiments: + tags = e.get('tags','') + name = e.get('name','') + + if tags!='': + md+=' * '+misc.make_url(tags, action='experiments', key='tags')+'\n' + elif name!='': + md+=' * '+misc.make_url(name, action='experiments')+'\n' + + z+=md+'\n' + + st.markdown(z) + + + # Check if has text + path = artifact.path + + for f in ['README.md', 'info.html']: + f1 = os.path.join(path, f) + if os.path.isfile(f1): + r = cmind.utils.load_txt(f1) + if r['return']>0: return r + + s = r['string'] + + st.markdown('---') + + if f.endswith('.html'): + y = s.split('\n') + ss = '' + for x in y: + ss+=x.strip()+'\n' + + st.write(ss, unsafe_allow_html=True) + else: + st.markdown(s) + + break + + # Check associated reports + r=cmind.access({'action':'find', + 'automation':'report,6462ecdba2054467', + 'tags':'challenge-{}'.format(uid)}) + if r['return']>0: return r + + lst = r['list'] + + for l in lst: + report_path = l.path + + f1 = os.path.join(report_path, 'README.md') + if os.path.isfile(f1): + report_meta = l.meta + + report_alias = report_meta['alias'] + report_title = report_meta.get('title','') + + report_name = report_title if report_title!='' else report_alias + + r = cmind.utils.load_txt(f1) + if r['return']>0: return r + + s = r['string'] + + st.markdown('---') + st.markdown('### '+report_name) + + st.markdown(s, unsafe_allow_html=True) + + + + + + + + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_challenges_with_prizes.py b/script/gui/playground_challenges_with_prizes.py new file mode 100644 index 0000000000..5e8d2a1b57 --- /dev/null +++ b/script/gui/playground_challenges_with_prizes.py @@ -0,0 +1,456 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + + name = params.get('name',[''])[0].strip() + tags = params.get('tags',[''])[0].lower() + + ii = {'action':'find', + 'automation':'challenge,3d84abd768f34e08'} + + if name!='': + ii['artifact']=name + if tags!='': + ii['tags']=tags + + r = cmind.access(ii) + if r['return']>0: return r + + lst = r['list'] + + end_html = '' + + if len(lst)==0: + st.markdown('Challenges were not found!') + else: + artifact = None + + if len(lst)==1: + artifact = lst[0] + else: + challenges = [] + + date_now = datetime.datetime.now().isoformat() + date_now2 = int(date_now[0:4]+date_now[5:7]+date_now[8:10]) + + ongoing = [] + + for l in sorted(lst, key=lambda x: ( + -int(x.meta.get('date_open','0')), + -int(x.meta.get('date_close','0')), + x.meta.get('title','') + )): + + row = {} + + meta = l.meta + row['uid']= meta['uid'] + + name = meta.get('title', meta['alias']) + + row['name']=name + + for k in ['date_close_extension', 'points', 'trophies', 'prize', 'prize_short', 'skip', 'sort']: + if k in meta: + row[k]=meta[k] + + under_preparation = meta.get('under_preparation', False) + row['under_preparation']=under_preparation + + date_open = meta.get('date_open','') + date_close = meta.get('date_close','') + + s_date_open = '' + if date_open!='': + r = misc.convert_date(date_open) + s_date_open = r['string'] if r['return']==0 else '' + + row['orig_date_open']=date_open + row['date_open']=s_date_open + + s_date_close = '' + if date_close!='': + r = misc.convert_date(date_close) + s_date_close = r['string'] if r['return']==0 else '' + + row['orig_date_close']=date_close + row['date_close']=s_date_close + + diff1 = 0 + diff2 = 0 + + if date_open!='': + diff1 = int(date_open)-int(date_now2) + + if date_close!='': + diff2 = int(date_close)-int(date_now2) + + + prefix = '' + if under_preparation: + prefix = 'Under preparation: ' + else: + if date_open!='' and diff1>0: + prefix = 'Opens on {}: '.format(s_date_open) + else: + if date_close!='': + if diff2<0: + prefix = 'Finished on {}: '.format(s_date_close) + else: + prefix = 'Open and finishes on {}: '.format(s_date_close) + else: + prefix = 'Open: '.format(s_date_close) + + + # Check if open challenge even if under preparation + if date_open and (date_close=='' or (diff1<=0 and diff2>0)): + ongoing.append(row) + else: + challenges.append({'prefix':prefix, 'name':name, 'uid':l.meta['uid']}) + + + + + # Show ongoing if open + if len(ongoing)>0: + ind = 1 + + x = ''' +
+

Ongoing reproducibility and optimization challenges

+ +
+ ''' + st.write(x, unsafe_allow_html = True) + + data = [] + + for row in sorted(ongoing, key=lambda row: (int(row.get('orig_date_close', 9999999999)), + row.get('sort', 0), + row.get('name', ''), + row.get('under_preparation', False) + )): + if row.get('skip',False): continue + + xrow = [] + + md = '' + up = row.get('under_preparation', False) + + x = row['name'] + y = '' + if up: + x = x[0].lower() + x[1:] + y = 'Under preparation: ' + + url = url_prefix + '?action=challenges&name={}'.format(row['uid']) +# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) + + x = ''' +
+ + {}{} + +
+ '''.format(y, url, x).replace('\n','') +# st.write(x, unsafe_allow_html = True) + + xrow.append(x) + + # Assemble info + x='' + + date_close = row.get('date_close','') + y = '' + if date_close!='' and date_close!=None: + x += '   Closing date: **{}**\n'.format(date_close) + y = date_close.replace(' ',' ') + + xrow.append(y) + + y = '' + if row.get('date_close_extension',False): + y = 'until done' + + xrow.append(y) + +# points = row.get('points',0) +# y = '' +# if points>0: +# x += '   Points: **{}**\n'.format(str(points)) +# y = str(points) +# +# xrow.append(y) + + + + awards = '' + + trophies = row.get('trophies',False) + if trophies: + x += '   Trophy: **Yes**\n' + awards += '🏆' + + + prize = row.get('prize_short','') + if prize!='': + x += '   Prizes from [MLCommons organizations](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https:/cKnowledge.org): **{}**\n'.format(prize) + if awards!='': awards+=' , ' + awards += prize + + xrow.append(awards) + + + if x!='': + md += '     '+x + +# st.markdown(md) + + + data.append(xrow) + ind+=1 + + + import pandas as pd + import numpy as np + + df = pd.DataFrame(data, + columns=['Challenge', 'Closing date', 'Extension', 'Contributor award and prizes from MLCommons organizations, cTuning foundation and cKnowledge.org']) + + df.index+=1 + +# st.table(df) + st.write(df.to_html(escape=False, justify='left'), unsafe_allow_html=True) + + # Show selector for all +# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', +# range(len(challenges)), +# format_func=lambda x: challenges[x], +# index=0, key='challenge') +# +# if challenge>0: +# artifact = artifacts[challenge] + + + + + # Process 1 challenge + if artifact is None: +# st.markdown('#### Past or future challenges:') + + x = ''' +
+

Future or past challenges

+
+ ''' + st.write(x, unsafe_allow_html = True) + + + for c in challenges: + + prefix = c['prefix'] + name = c['name'] + uid = c['uid'] + + url = url_prefix + '?action=challenges&name={}'.format(uid) + + x = ''' +
+ {}) {}{} +
+ '''.format(str(ind), prefix, url, name) + + st.write(x, unsafe_allow_html = True) + + ind+=1 + + + + + + + + + + + + else: + meta = artifact.meta + + name = meta.get('title', meta['alias']) + uid = meta['uid'] + + st.write(''' +
+

Challenge: {}

+
+ '''.format(name), + unsafe_allow_html=True + ) + + end_html='
Self link
'.format(misc.make_url(meta['uid'], action='challenges', md=False)) + + + # Check basic password + password_hash = meta.get('password_hash','') + view = True + if password_hash!='': + view = False + + password = st.text_input("Enter password", type="password", key="password") + + if password!='': + import bcrypt + # TBD: temporal hack to demo password protection for experiments + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt) + + if password_hash.encode('utf-8')==password_hash2: + view=True + else: + st.markdown('**Warning:** wrong password') + + if not view: + return {'return':0, 'end_html':end_html} + + + + z = '' + date_open = meta.get('date_open','') + if date_open!='': + # Format YYYYMMDD + r = misc.convert_date(date_open) + if r['return']>0: return r + z+='* **Open date:** {}\n'.format(r['string']) + + date_close = meta.get('date_close','') + if date_close!='': + # Format YYYYMMDD + r = misc.convert_date(date_close) + if r['return']>0: return r + z+='* **Closing date:** {}\n'.format(r['string']) + + if meta.get('trophies', False): + z+='* **MLCommons Collective Knowledge Contributor award:** Yes\n' + + prize_short = meta.get('prize_short','') + if prize_short!='': + z+='* **Prizes:** {}\n'.format(prize_short) + +# prize = meta.get('prize','') +# if prize!='': +# z+='* **Student prizes:** {}\n'.format(prize) + + + urls = meta.get('urls',[]) + url = meta.get('url', '') + + if url!='': urls.append(url) + + if len(urls)>0: + x = '* **External link:** ' + md = '' + if len(urls)>1: + md = '* **External links:**\n' + x=' * ' + + for u in urls: + md+=x+'[{}]({})\n'.format(u,u) + z+=md+'\n' + + + # Check if has linked experiments + experiments = meta.get('experiments',[]) + + if len(experiments)>0: + md = '* **Shared experiments:**\n' + + for e in experiments: + tags = e.get('tags','') + name = e.get('name','') + + if tags!='': + md+=' * '+misc.make_url(tags, action='experiments', key='tags') + elif name!='': + md+=' * '+misc.make_url(name, action='experiments') + + z+=md+'\n' + + st.markdown(z) + + + # Check if has text + path = artifact.path + + for f in ['README.md', 'info.html']: + f1 = os.path.join(path, f) + if os.path.isfile(f1): + r = cmind.utils.load_txt(f1) + if r['return']>0: return r + + s = r['string'] + + st.markdown('---') + + if f.endswith('.html'): + y = s.split('\n') + ss = '' + for x in y: + ss+=x.strip()+'\n' + + st.write(ss, unsafe_allow_html=True) + else: + st.markdown(s) + + break + + # Check associated reports + r=cmind.access({'action':'find', + 'automation':'report,6462ecdba2054467', + 'tags':'challenge-{}'.format(uid)}) + if r['return']>0: return r + + lst = r['list'] + + for l in lst: + report_path = l.path + + f1 = os.path.join(report_path, 'README.md') + if os.path.isfile(f1): + report_meta = l.meta + + report_alias = report_meta['alias'] + report_title = report_meta.get('title','') + + report_name = report_title if report_title!='' else report_alias + + r = cmind.utils.load_txt(f1) + if r['return']>0: return r + + s = r['string'] + + st.markdown('---') + st.markdown('### '+report_name) + + st.markdown(s, unsafe_allow_html=True) + + + + + + + + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_contributors.py b/script/gui/playground_contributors.py new file mode 100644 index 0000000000..1c44f417bb --- /dev/null +++ b/script/gui/playground_contributors.py @@ -0,0 +1,358 @@ +# Developer(s): Grigori Fursin + +import cmind +import misc +import os + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + + name = params.get('name',[''])[0].lower() + + list_all = False + + if name!='': + r=cmind.access({'action':'load', + 'automation':'contributor,68eae17b590d4f8f', + 'artifact':name}) + if r['return']>0 and r['return']!=16: + return r + + end_html = '' + + if r['return']==0: + meta = r['meta'] + + path = r['path'] + + name = meta.get('name',meta.get('organization','')) + if name!='': + st.markdown("#### "+name) + + x='' + for t in meta.get('trophies',[]): + url = t.get('url','') + if url != '': + x+='🏆 '.format(url) + + if x!='': + st.write('

'+x+'

', unsafe_allow_html = True) + + end_html=''' +
+ Self link +
+ '''.format(misc.make_url(meta['uid'], action='contributors', md=False)) + + org = meta.get('organization','') + if org!='': + st.markdown("* **Organization:** "+org) + + urls = meta.get('urls',[]) + + url = meta.get('url', '') + if url!='': urls.append(url) + + if len(urls)>0: + x = '* **Web page:** ' + md = '' + if len(urls)>1: + md = '* **Web pages:**\n' + x=' * ' + + for u in urls: + md+=x+'[{}]({})\n'.format(u,u) + + st.markdown(md) + + ongoing = meta.get('ongoing',[]) + + x = str(calculate_points(meta)) + y1 ='' + y2 = '' + if len(ongoing)>0: + y1 = '*' + y2 = ' (ongoing)*' + st.markdown("* **Points: {}{}{}**".format(y1,x,y2)) +# st.write('

'+x+'

', unsafe_allow_html = True) + + if len(ongoing)>0: + x = "* **Ongoing challenges:**\n" + + for t in ongoing: + if t != '': + x+=" - {}\n".format(misc.make_url(t, action='challenges', key='tags')) + + st.markdown(x) + + challenges = meta.get('challenges',[]) + if len(challenges)>0: + md = "* **Contributions:**\n" + + for c in sorted(challenges): + md+=" * {}\n".format(misc.make_url(c, action='challenges', key='tags')) + + st.markdown(md) + + # Check if README + md = '' + + readme = os.path.join(path, 'README.md') + if os.path.isfile(readme): + + r = cmind.utils.load_txt(readme) + if r['return']>0: return r + + md += r['string'] + + st.markdown('---') + st.markdown(md) + + + else: + st.markdown('**Warning:** Contributor "{}" not found!'.format(name)) + + return {'return':0, 'end_html':end_html} + + + return page_list(st, params) + + +def page_list(st, params): + import pandas as pd + import numpy as np + + # Read all contributors + r = cmind.access({'action':'find', + 'automation':'contributor,68eae17b590d4f8f'}) + if r['return']>0: return r + + lst = r['list'] + + # Prepare the latest contributors + all_data = [] + keys = [ + ('name', 'Name', 400, 'leftAligned'), + ('points', 'Points', 80,'rightAligned'), +# ('ongoing_number', 'Ongoing challenges', 80, 'rightAligned'), + ('trophies', 'Trophies', 80,'rightAligned') + ] + + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + + md_people = '' + md_org = '' +# for l in sorted(lst, key=lambda x: (-int(x.meta.get('last_participation_date','0')), +# for l in sorted(lst, key=lambda x: x.meta.get('name', x.meta.get('organization','')).lower()): + + for l in lst: + + row = {} + + m = l.meta + + # Skip from stats + if m.get('skip', False): + continue + + lpd = m.get('last_participation_date', '') + trophies = m.get('trophies', []) + ongoing = m.get('ongoing', []) + +# if lpd=='-' or (lpd!='' and int(lpd)<2023) : +# continue +# +# if len(ongoing)==0 and len(trophies)==0: +# continue + +# if lpd!='': + if True: + uid = m['uid'] + alias = m['alias'] + name = m.get('name', '') + org = m.get('organization', '') + + row['name_to_print'] = name if name!='' else org + + + # Registration in the CK challenges gives 1 point + y1 ='' + y2 = '' + if len(ongoing)>0: + y1 = '*' + y2 = ' (ongoing)*' + + row['points'] = calculate_points(m) + + row['ongoing_number'] = len(ongoing) + x = '' + for t in ongoing: + if t != '': + url = url_prefix + '?action=challenges&tags={}'.format(t) + x+='{}
'.format(url,t.replace('-', ' ').replace(',',' ')) + + row['ongoing'] = x + + name2 = '' + + if name!='': + url = misc.make_url(name, alias=uid, md = False) + md_people += '* '+ misc.make_url(name, alias=uid) +'\n' + + if org!='': + name2 = ' ({})'.format(org) + + elif org!='': + url = misc.make_url(org, alias=alias, md = False) + md_org += '* '+ misc.make_url(org, alias=alias) +'\n' + name = org + + row['name'] = '{}{}'.format(url_prefix + url, name, name2) + + row['trophies_number'] = len(trophies) + x = '' + for t in trophies: + url = t.get('url','') + if url != '': + x+='🏆 '.format(url) + + row['trophies'] = x + + + all_data.append(row) + + + # Visualize table + pd_keys = [v[0] for v in keys] + pd_key_names = [v[1] for v in keys] + pd_all_data = [] + for row in sorted(all_data, key=lambda row: (row.get('ongoing_number',0)<=0, + -row.get('points',0), + -row.get('trophies_number',0), + name_to_sort(row))): + pd_row=[] + for k in pd_keys: + pd_row.append(row.get(k)) + pd_all_data.append(pd_row) + + df = pd.DataFrame(pd_all_data, columns = pd_key_names) + + df.index+=1 + + x = ''' +
+ + + Check on-going challenges + and register here + to be added to this leaderboard. + + +
+
+ '''.format(url_prefix) + + st.write(x, unsafe_allow_html = True) + + st.write('
'+df.to_html(escape=False, justify='left')+'
', unsafe_allow_html=True) + + + +# from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode +# from st_aggrid.shared import JsCode +# +# gb = GridOptionsBuilder.from_dataframe(df, editable=False) +# +# for k in keys: +# gb.configure_column( +# k[1], +# headerName=k[1], +# width=k[2], +# type=k[3], +# cellRenderer=JsCode(""" +# class UrlCellRenderer { +# init(params) { +# this.eGui = document.createElement('a'); +# this.eGui.innerHTML = params.value; +# } +# getGui() { +# return this.eGui; +# } +# } +# """) +# ) +# +# AgGrid(df, +# gridOptions=gb.build(), +# updateMode=GridUpdateMode.VALUE_CHANGED, +# enable_enterprise_modules=False, +# allow_unsafe_jscode=True) + +# st.write(grid) #, unsafe_allow_html = True) + +# st.dataframe(df) +# st.write(df.to_html(escape = False), unsafe_allow_html = True) + + +# if md_people!='': +# st.markdown("### The latest contributors (individuals)") +# st.markdown('Huge thanks to all our contributors for supporing this community project:') +# st.markdown(md_people) + + +# if md_org!='': +# st.markdown("### The latest contributors (organizations)") +# st.markdown(md_org) + + # Prepare list of all contributors + +# md = '' +# for l in sorted(lst, key=lambda x: x.meta.get('name',x.meta.get('organization','')).lower()): +# md += prepare_name(l.meta) +# +# if md!='': +# st.markdown("### All contributors (individuals and orgs)") +# st.markdown(md) + + return {'return':0} + + +def name_to_sort(meta): + name = meta.get('name_to_print', '') + + xname = name.split(' ') + + sname = xname[-1].lower() + + return sname + + +def calculate_points(meta): + + points = 1 + + xpoints = meta.get('points',[]) + for x in xpoints: + points += int(x.get('point',0)) + + # Automatic challenges + points += len(meta.get('challenges',[])) + points += len(meta.get('ongoing',[])) + + return points + + +def prepare_name(meta): + alias = meta['alias'] + name = meta.get('name', '') + org = meta.get('organization', '') + + md = '' + if name!='': + md = '* '+misc.make_url(name, alias=alias)+'\n' + elif org!='': + md = '* *'+misc.make_url(org, alias=alias)+'*\n' + + return md diff --git a/script/gui/playground_howtorun.py b/script/gui/playground_howtorun.py new file mode 100644 index 0000000000..e533ac37fb --- /dev/null +++ b/script/gui/playground_howtorun.py @@ -0,0 +1,301 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' + +def main(): + params = misc.get_params(st) + + # Set title + st.title('How to run benchmarks') + + st.markdown(announcement) + + return page(st, params) + + + + +def page(st, params, action = ''): + + end_html = '' + + # Announcement +# st.markdown('----') + + url_script = misc.make_url('', key='', action='scripts', md=False) + + # Some info + x = ''' + + + This interface will help you generate a command line or Python API + to run modular benchmarks composed from + automation recipes (CM scripts). + Note that this is a collaborative engineering effort + to make sure that they work across all possible versions and configurations of models, data sets, software and hardware + - please report encountered issues and provide feedback + here + and get in touch via Discord! + + +
+
+ '''.format(url_script) + + st.write(x, unsafe_allow_html = True) + +# st.markdown(announcement) + + + ############################################################################################ + # Select target hardware + compute_uid = '' + x = params.get('compute_uid',['']) + if len(x)>0 and x[0]!='': compute_uid = x[0].strip() + + ii = {'action':'load_cfg', + 'automation':'utils', + 'tags':'benchmark,compute', + 'skip_files':False} + + if compute_uid!='': + ii['prune']={'uid':compute_uid} + + r = cmind.access(ii) + if r['return']>0: return r + + r = misc.make_selection(st, r['selection'], 'compute', 'target hardware', compute_uid) + if r['return']>0: return r + + compute_meta = r['meta'] +# st.markdown(compute_meta) + + ############################################################################################ + # Select benchmark + bench_uid = '' + x = params.get('bench_uid',['']) + if len(x)>0 and x[0]!='': bench_uid = x[0].strip() + + ii = {'action':'load_cfg', + 'automation':'utils', + 'tags':'benchmark,list', + 'skip_files':False} + + if bench_uid!='': + ii['prune']={'uid':bench_uid} + + r = cmind.access(ii) + if r['return']>0: return r + + # Prune by supported compute + selection = r['selection'] + pruned_selection = [] + + if len(compute_meta)==0 or compute_meta.get('tags','')=='': + pruned_selection = selection + else: + xtags = set(compute_meta['tags'].split(',')) + +# st.markdown(str(xtags)) + + for s in selection: + add = True + + supported_compute = s.get('supported_compute',[]) + if len(supported_compute)>0: + add = False + + for c in supported_compute: + cc = set(c.split(',')) + if cc.issubset(xtags): + add = True + break + + if add: + pruned_selection.append(s) + + # Make default selection of MLPerf inference + force_bench_index = 0 + if bench_uid == '': + j = 0 + for q in sorted(pruned_selection, key = lambda v: v['name']): + j += 1 + if q['uid'] == '39877bb63fb54725': + force_bench_index = j + + r = misc.make_selection(st, pruned_selection, 'benchmark', 'benchmark', bench_uid, force_index = force_bench_index) + if r['return']>0: return r + + bench_meta = r['meta'] +# st.markdown(bench_meta) + + if len(bench_meta)>0: + ############################################################################################ + # Check common CM interface + +# st.markdown('---') + + urls = bench_meta.get('urls',[]) + + script_path = '' + script_name = bench_meta.get('script_name','') + script_meta = {} + script_obj = None + script_url = '' + if script_name!='': + ii = {'action':'find', + 'automation':'script', + 'artifact':script_name} + r = cmind.access(ii) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)>0: + + script_obj = lst[0] + + script_meta = script_obj.meta + script_path = script_obj.path + script_repo_meta = script_obj.repo_meta + + script_alias = script_meta['alias'] + + repo_meta = script_obj.repo_meta + + url = repo_meta.get('url','') + if url=='' and repo_meta.get('git', False): + url = 'https://github.com/'+repo_meta['alias'].replace('@','/') + + if url!='': + # Recreate GitHub path + if not url.endswith('/'): url=url+'/' + + url += 'tree/master/' + + if repo_meta.get('prefix','')!='': + url += repo_meta['prefix'] + + if not url.endswith('/'): url=url+'/' + + url += 'script/'+script_alias + + script_url = url + + if not bench_meta.get('skip_extra_urls', False): + url_script = misc.make_url(script_name, key='name', action='scripts', md=False) + url_script += '&gui=true' + + urls.append({'name': 'Universal CM GUI to run this benchmark', + 'url': url_script}) + + # Check if extra README + script_path_readme_extra = os.path.join(script_path, 'README-extra.md') + + if os.path.isfile(script_path_readme_extra): + # Check README.extra.md + url_readme_extra = url+'/README-extra.md' + + urls.append({'name': 'Notes about how to run this benchmark from the command line', + 'url': url_readme_extra}) + + + # Check URLS + if len(urls)>0: + x = '\n' + for u in urls: + name = u['name'] + url = u['url'] + + x+='* [{}]({})\n'.format(name, url) + x+='\n' + + st.markdown(x) + + ############################################################################################ + # Check if has customization + extra = {} + skip = False + + script_tags = script_meta.get('tags_help','') + if script_tags =='': + script_tags = ','.join(script_meta.get('tags',[])) + + if script_obj!=None: + ii = {'st': st, + 'params': params, + 'meta': script_obj.meta, + 'misc_module': misc, + 'compute_meta':compute_meta, + 'bench_meta':bench_meta, + 'script_path':script_path, + 'script_tags':script_tags, + 'script_url':script_url} + + import sys + import importlib + + full_module_path = os.path.join(script_obj.path, 'customize.py') + + tmp_module = None + try: + found_automation_spec = importlib.util.spec_from_file_location('customize', full_module_path) + if found_automation_spec != None: + tmp_module = importlib.util.module_from_spec(found_automation_spec) + found_automation_spec.loader.exec_module(tmp_module) +# tmp_module=importlib.import_module('customize') + except Exception as e: + st.markdown('WARNING: {}'.format(e)) + pass + + if tmp_module!=None: + if hasattr(tmp_module, 'gui'): + try: + func = getattr(tmp_module, 'gui') + except Exception as e: + return {'return':1, 'error':format(e)} + + r = func(ii) + if r['return'] > 0 : return r + + extra = r.get('extra', {}) + skip = r.get('skip', False) + + ############################################################################################ + # Show official GUI + if script_path!='' and not skip: + import script + + ii = {'st': st, + 'params': params, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, + 'script_meta': script_meta, + 'script_repo_meta': script_repo_meta, + 'skip_bottom': True, + 'extra': extra} + + rr = script.page(ii) + if rr['return']>0: return rr + + end_html += '\n'+rr.get('end_html','') + + ############################################################################################ + self_url = misc.make_url(bench_meta['uid'], action='howtorun', key='bench_uid', md=False) + + if len(compute_meta)>0: + self_url += '&compute_uid='+compute_meta['uid'] + + end_html='
Self link
'.format(self_url) + + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_install.py b/script/gui/playground_install.py new file mode 100644 index 0000000000..479ab09857 --- /dev/null +++ b/script/gui/playground_install.py @@ -0,0 +1,141 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + +def page(st, params, extra): + + end_html = '' + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + + if not extra.get('skip_header', False): + st.markdown('---') + st.markdown('**Install [MLCommons Collective Mind automation framework](https://github.com/mlcommons/ck):**') + + + + md = '' + + ################################################################### + # Select OS + choices = [('Ubuntu, Debian and similar Linux', 'linux'), + ('Red Hat and CentOS', 'redhat'), + ('MacOS', 'macos'), + ('Windows', 'windows')] + + host_os_selection = 0 + + if extra.get('run_on_windows', False): + host_os_selection = 3 + + host_os = st.selectbox('Select your host OS:', + range(len(choices)), + format_func = lambda x: choices[x][0], + index = host_os_selection, + key = 'install_select_host_os') + + host_os_index = choices[host_os][1] + + + cur_script_file = __file__ + cur_script_path = os.path.dirname(cur_script_file) + + + notes = os.path.join(cur_script_path, 'install', host_os_index+'.md') + + if os.path.isfile(notes): + r = cmind.utils.load_txt(notes) + if r['return']>0: return r + s = r['string'] + if s != '': + show = st.toggle('Show system dependencies?', value = True) + if show: + md += s + + + need_user = '' + python = 'python3' + if host_os_index == 'redhat': + need_user = ' --user' + elif host_os_index == 'windows': + python = 'python' + + + ################################################################### + # Select repository + + choices = [('Stable Git version from GitHub: mlcommons@ck', 'stable'), + ('Dev Git version from GitHub: ctuning@mlcommons-ck', 'ctuning'), + ('Small and stable ZIP from Zenodo: 20240223', 'zenodo')] + + repo = st.selectbox('Select repository with [automation recipes (CM scripts)](https://access.cknowledge.org/playground/?action=scripts):', + range(len(choices)), + format_func = lambda x: choices[x][0], + index=0, + key='select_repo') + + repo_index = choices[repo][1] + + + # Add stable repo from Zenodo + if repo_index == 'ctuning': + cm_repo = 'ctuning@mlcommons-ck' + elif repo_index == 'zenodo': + cm_repo = '--url=https://zenodo.org/records/10787459/files/cm-mlops-repo-20240306.zip' + else: + cm_repo = 'mlcommons@ck' + + x = '{} -m pip install cmind -U {}\n\n'.format(python, need_user) + x += 'cm pull repo {}\n\n'.format(cm_repo) + + clean_cm_cache = st.toggle('Clean CM cache', value=True, key = 'install_clean_cm_cache') + + cm_clean_cache = 'cm rm cache -f\n\n' if clean_cm_cache else '' + + x += cm_clean_cache + + + + python_venv_name=params.get('@adr.python.name', '') + python_ver_min=params.get('@adr.python.version_min', '') + python_ver=params.get('@adr.python.version', '') + + if python_venv_name == '': + use_python_venv = st.toggle('Use Python Virtual Environment for CM scripts?', value = False) + if use_python_venv: + python_venv_name = st.text_input('Enter some CM python venv name for your project:', value = "mlperf-v4.0") + + if python_ver_min == '': + python_ver_min = st.text_input('[Optional] Specify min version such as 3.8:') + + y = '' + if python_venv_name!='':# or python_ver!='' or python_ver_min!='': + y = 'cm run script "get sys-utils-cm"\n' + + if python_venv_name!='': + y+='cm run script "install python-venv" --name='+str(python_venv_name) + else: + y+='cm run script "get python"' + + if python_ver!='': + y+=' --version='+str(python_ver) + + if python_ver_min!='': + y+=' --version_min='+str(python_ver_min) + + if y!='': + x+=y + + + md += '```bash\n{}\n```\n'.format(x) + + st.markdown('---') + st.markdown(md) + st.markdown('*Check [more CM installation notes at GitHub](https://github.com/mlcommons/ck/blob/master/docs/installation.md)*.') + + + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_reports.py b/script/gui/playground_reports.py new file mode 100644 index 0000000000..43aed1f04d --- /dev/null +++ b/script/gui/playground_reports.py @@ -0,0 +1,136 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + + name = params.get('name',[''])[0].strip() + tags = params.get('tags',[''])[0].lower() + + ii = {'action':'find', + 'automation':'report,6462ecdba2054467'} + + if name!='': + ii['artifact']=name + if tags!='': + ii['tags']=tags + + r = cmind.access(ii) + if r['return']>0: return r + + lst = r['list'] + + end_html = '' + + ############################################################################## + if len(lst)==0: + st.markdown('Reports were not found!') + + ############################################################################## + elif len(lst)==1: + l = lst[0] + + meta = l.meta + + uid = meta['uid'] + + title = meta.get('title', meta['alias']) + + path = l.path + + x = ''' +
+

Community report

+
{}
+
+ '''.format(title) + + st.write(x, unsafe_allow_html = True) + + end_html='
Self link
'.format(misc.make_url(meta['uid'], action='reports', md=False)) + + + # Check basic password + password_hash = meta.get('password_hash','') + view = True + if password_hash!='': + view = False + + password = st.text_input("Enter password", type="password", key="password") + + if password!='': + import bcrypt + # TBD: temporal hack to demo password protection for experiments + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt) + + if password_hash.encode('utf-8')==password_hash2: + view=True + else: + st.markdown('**Warning:** wrong password') + + if not view: + return {'return':0, 'end_html':end_html} + + # Check if has text + for f in ['README.md']: + f1 = os.path.join(path, f) + if os.path.isfile(f1): + r = cmind.utils.load_txt(f1) + if r['return']>0: return r + + s = r['string'] + + st.markdown('---') + + if f.endswith('.html'): + y = s.split('\n') + ss = '' + for x in y: + ss+=x.strip()+'\n' + + st.write(ss, unsafe_allow_html=True) + else: + st.markdown(s) + + break + + + ############################################################################## + else: + reports = [] + + md = '' + + for l in sorted(lst, key=lambda x: x.meta.get('date',''), reverse=True): + + meta = l.meta + + if meta.get('private',False): + continue + + uid = meta['uid'] + + title = meta.get('title', meta['alias']) + + url = meta.get('redirect','') + if url == '': + url = url_prefix + '?action=reports&name={}'.format(uid) + + md += '* ['+title+']('+url+')\n' + + x = ''' +
+

Community reports

+
+ ''' + st.write(x, unsafe_allow_html = True) + + st.markdown(md) + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_reproduce.py b/script/gui/playground_reproduce.py new file mode 100644 index 0000000000..28396b8961 --- /dev/null +++ b/script/gui/playground_reproduce.py @@ -0,0 +1,436 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +import json + +announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' + +badges={ + 'functional':{'url':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, + 'reproduced':{'url':'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, + 'support_docker':{'url':'https://cTuning.org/images/docker_logo2_small.png'}, + 'support_cm':{'url':'https://cTuning.org/images/logo-ck-single-tr4.png'} + } + + +def main(): + params = misc.get_params(st) + + # Set title + st.title('Reproducibility studies') + + st.markdown(announcement) + + return page(st, params) + + + + +def page(st, params, action = ''): + + end_html = '' + + +# st.markdown('----') + + self_url = misc.make_url('', key='', action='reproduce', md=False) + url_benchmarks = misc.make_url('', key='', action='howtorun', md=False) + url_challenges = misc.make_url('', key='', action='challenges', md=False) + + # Some info + x = ''' + + + [Under development] This is a new project to reproduce modular benchmarks + across different models, data sets, software and hardware + via open challenges + based on the ACM/cTuning reproducibility methodology and badges + and automatically compose + High-Performance and Cost-Efficient AI Systems with MLCommons' Collective Mind and MLPerf. + Note that this is a collaborative engineering effort + - please report encountered issues and provide feedback + here + and get in touch via Discord! + + +
+
+ '''.format(url_benchmarks, url_challenges) + + st.write(x, unsafe_allow_html = True) + + + return {'return':0} + + +# st.markdown(announcement) + + # Check if test is selected + test_uid = '' + x = params.get('test_uid',['']) + if len(x)>0 and x[0]!='': test_uid = x[0].strip() + + + ############################################################################################ + # Select target hardware + compute_uid = '' + compute_meta = {} + compute_selection = [] + + if test_uid == '': + x = params.get('compute_uid',['']) + if len(x)>0 and x[0]!='': compute_uid = x[0].strip() + + ii = {'action':'load_cfg', + 'automation':'utils', + 'tags':'benchmark,compute', + 'skip_files':False} + + if compute_uid!='': + ii['prune']={'uid':compute_uid} + + r = cmind.access(ii) + if r['return']>0: return r + compute_selection = r['selection'] + + if test_uid == '': + r = misc.make_selection(st, r['selection'], 'compute', 'target hardware', compute_uid) + if r['return']>0: return r + compute_meta = r['meta'] + compute_uid = compute_meta.get('uid','') + + + ############################################################################################ + # Select benchmark + bench_meta = {} + + bench_name = '' + x = params.get('bench_name',['']) + if len(x)>0 and x[0]!='': bench_name = x[0].strip() + + if test_uid == '': + ii = {'action':'load_cfg', + 'automation':'utils', + 'tags':'benchmark,run', + 'skip_files':True} + + if bench_name!='': + ii['artifact']=bench_name + + r = cmind.access(ii) + if r['return']>0: return r + + # Prune by supported compute + selection = r['selection'] + pruned_selection = [] + + if compute_uid == '': + pruned_selection = selection + else: + for s in selection: + add = True + + if compute_uid in s.get('supported_compute',[]): + pruned_selection.append(s) + + r = misc.make_selection(st, pruned_selection, 'benchmark', 'benchmark', bench_name) + if r['return']>0: return r + + bench_meta = r['meta'] + + ############################################################################################ + # Select tests + if test_uid == '' and compute_uid == '' and len(bench_meta) == 0: + st.markdown('*Please prune search by device and/or benchmark ...*') + + else: + ii = {'action':'load_cfg', + 'automation':'utils', + 'tags':'benchmark,run', + 'key':'run-', + 'key_end':['-meta.json', '-meta.yaml'], + 'skip_files':False} + + if len(bench_meta)>0 or bench_name!='': + if len(bench_meta)>0: + ii['artifact']=bench_meta['uid'] + else: + ii['artifact']=bench_name + elif compute_uid !='' : + ii['prune']={'meta_key':'supported_compute', + 'meta_key_uid':compute_uid} + + if compute_uid != '': + if 'prune' not in ii: ii['prune']={} + ii['prune']['key'] = 'compute_uid' + ii['prune']['key_uid'] = compute_uid + + if test_uid!='': + if 'prune' not in ii: ii['prune']={} + ii['prune']['uid']=test_uid + + r = cmind.access(ii) + if r['return']>0: return r + + # Prune by supported compute + selection = r['selection'] + + if len(selection)==0: + st.markdown('*WARNING: No tests found!*') + else: + if len(selection)==1: + ################################################################### + # Show individual test + s = selection[0] + + full_path = s['full_path'] + test_uid = s['uid'] + + st.markdown('---') + st.markdown('**Test {}**'.format(test_uid)) + + # Check badges + x = '' + + for b in badges: + if s.get(b, False) or b=='support_cm': + x += '\n'.format(badges[b]['url']) + + if x!='': + st.write(x, unsafe_allow_html = True) + + # Check benchmark + bench_uid = s.get('bench_uid','') + if bench_uid != '': + url_bench = url_benchmarks + '&bench_uid='+bench_uid + st.markdown('[Link to benchmark GUI]({})'.format(url_bench)) + + # Check notes + test_md = full_path[:-10]+'.md' + if os.path.isfile(test_md): + + r = cmind.utils.load_txt(test_md) + if r['return']>0: return r + + x = r['string'] + + if x!='': + st.markdown('**Notes:**') + st.markdown(x) + + inp = {} + input_file = full_path[:-10]+'-input' + r = cmind.utils.load_yaml_and_json(input_file) + if r['return']==0: + inp = r['meta'] + + out = {} + output_file = full_path[:-10]+'-output' + r = cmind.utils.load_yaml_and_json(output_file) + if r['return']==0: + out = r['meta'] + + cmd = inp.get('cmd',[]) + if len(cmd)>0: + xcmd = ' \\\n '.join(cmd) + + st.markdown(""" +**CM command line:** +```bash +cm run script {} +``` + """.format(xcmd)) + + + st.markdown(""" +**CM input dictionary:** +```json +{} +``` + """.format(json.dumps(inp, indent=2))) + + + st.markdown(""" +**CM input dictionary:** +```json +{} +``` + """.format(json.dumps(out, indent=2))) + + + st.markdown(""" + +**Test meta:** +```json +{} +``` + """.format(json.dumps(s, indent=2))) + + + else: + ################################################################### + # Show tables + import pandas as pd + import numpy as np + + html = '' + + all_data = [] + + + # TBD: should be taken from a given benchmark + dimensions = [] + + if len(bench_meta)>0: + dimensions = bench_meta.get('view_dimensions', []) + + dimension_values = {} + dimension_keys = [] + + if len(dimensions) == 0: + keys = [('test', 'CM test', 400, 'leftAligned')] + else: + keys = [('test', 'CM test', 50, 'leftAligned')] + + for k in dimensions: + key = k[0] + + keys.append((k[0], k[1], 100, 'leftAligned')) + + dimension_values[key] = [] + dimension_keys.append(key) + + # If dimensions, sort by dimensions + for d in list(reversed(dimension_keys)): + selection = sorted(selection, key = lambda x: misc.get_with_complex_key_safe(selection, d)) + + keys += [ + ('functional', 'Functional', 80, ''), + ('reproduced', 'Reproduced', 80, ''), + ('support_docker', 'Support Docker', 80, ''), + ('support_cm', 'Has unified CM interface', 80, ''), + ('notes', 'Notes', 200, 'lefAligned'), + ] + + j = 0 + + + for s in selection: + + row = {} + + full_path = s['full_path'] + test_uid = s['uid'] + + uid = s['uid'] + + url_test = misc.make_url(uid, key='test_uid', action='reproduce', md=False) + + bench_meta = s['main_meta'] + + inp = {} + input_file = full_path[:-10]+'-input' + r = cmind.utils.load_yaml_and_json(input_file) + if r['return']==0: + inp = r['meta'] + + out = {} + output_file = full_path[:-10]+'-output' + r = cmind.utils.load_yaml_and_json(output_file) + if r['return']==0: + out = r['meta'] + + row_meta = {'dict': s, + 'input': inp, + 'output': out} + + if len(dimensions) == 0: + row['test'] = '{}'.format(url_test, uid) + else: + row['test'] = 'View'.format(url_test) + for k in dimensions: + kk = k[0] + + v = misc.get_with_complex_key_safe(row_meta, kk) + + if len(k)>2 and k[2]=='tick': + if v!=None and v!='': + v = '✅' + + row[kk] = str(v) + + + # Check ACM/IEEE functional badge + url = '' + + x = '' + if s.get('functional', False): + x = '
'.format(url, badges['functional']['url']) + row['functional'] = x + + # Check ACM/IEEE reproduced badge + x = '' + if s.get('reproduced', False): + x = '
'.format(url, badges['reproduced']['url']) + row['reproduced'] = x + + # Check Docker + x = '' + if s.get('support_docker', False): + x = '
'.format(url, badges['support_docker']['url']) + row['support_docker'] = x + + x = '' + bench_uid = s.get('bench_uid','') + if bench_uid != '': + url_bench = url_benchmarks + '&bench_uid='+bench_uid + x = '
'.format(url_bench, badges['support_cm']['url']) + row['support_cm'] = x + + # Check misc notes + row['notes']=''+s.get('notes','')+'' + + # Finish row + all_data.append(row) + + # Visualize table + pd_keys = [v[0] for v in keys] + pd_key_names = [v[1] for v in keys] + + pd_all_data = [] + for row in sorted(all_data, key=lambda row: (row.get('x1',0))): + pd_row=[] + for k in pd_keys: + pd_row.append(row.get(k)) + pd_all_data.append(pd_row) + + df = pd.DataFrame(pd_all_data, columns = pd_key_names) + + df.index+=1 + + html=df.to_html(escape=False, justify='left') + st.write(html, unsafe_allow_html = True) + + + + + + + if bench_name!='': + self_url+='&bench_name='+bench_name + if test_uid!='': + self_url+='&test_uid='+test_uid + elif compute_uid!='': + self_url+='&compute_uid='+compute_uid + + end_html='
Self link
'.format(self_url) + + + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_scripts.py b/script/gui/playground_scripts.py new file mode 100644 index 0000000000..a3bc935038 --- /dev/null +++ b/script/gui/playground_scripts.py @@ -0,0 +1,317 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix_script = url_prefix + '?action=scripts' + + script_name = '' + x = params.get('name',['']) + if len(x)>0 and x[0]!='': script_name = x[0].strip() + + script_tags = '' + if script_name == '': + x = params.get('tags',['']) + if len(x)>0 and x[0]!='': script_tags = x[0].strip() + + + if script_tags == 'modular,app': + x = ''' + + + This is a new project to automatically compose AI applications that can run across diverse models, data sets, software and hardware + - please check our presentation at the MLPerf-Bench workshop @ HPCA'24 + and get in touch via Discord! + + +
+
+ ''' + + else: + x = ''' + + + Collective Mind is a collection of open-source, portable, extensible and ready-to-use + automation scripts with a human-friendly interface and minimal dependencies to make it easier to compose, benchmark and optimize + complex AI, ML and other applications and systems across diverse and continuously changing models, data sets, software and hardware. + Note that this is a collaborative engineering effort + to make sure that they work across all possible versions and configurations + - please report encountered issues and provide feedback + here + and get in touch via Discord! + + +
+
+ ''' + + st.write(x, unsafe_allow_html = True) + + + script_tags = st.text_input('Search open-source automation recipes by tags:', value=script_tags, key='script_tags').strip() + + # Searching automation recipes + + ii = {'action':'find', + 'automation':'script,5b4e0237da074764'} + + if script_tags!='': + script_tags=script_tags.replace(' ',',') + ii['tags']=script_tags + elif script_name!='': + ii['artifact']=script_name + + # Check variations for later: + variations = [v for v in script_tags.split(',') if v.startswith('_')] + + r = cmind.access(ii) + if r['return']>0: return r + + lst2 = r['list'] + + lst = [v for v in lst2 if not v.meta.get('private', False)] + + end_html = '' + + if len(lst)==0: + st.markdown('CM scripts were not found!') + else: + artifact = None + + if len(lst)==1: + # Individual script + recipe = lst[0] + + meta = recipe.meta + + alias = meta['alias'] + uid = meta['uid'] + + use_gui = False + x = params.get('gui',['']) + if len(x)>0 and (x[0].lower()=='true' or x[0].lower()=='yes'): + import script + + script_path = recipe.path + script_alias = alias + +# script_tags = script_tags + if script_tags=='': + script_tags = meta.get('tags_help','') + if script_tags !='': + script_tags=script_tags.replace(' ',',') + else: + script_tags = ','.join(meta.get('tags',[])) + + ii = {'st': st, + 'params': params, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, + 'script_meta': meta, + 'script_repo_meta': recipe.repo_meta, + 'skip_bottom': True} + + return script.page(ii) + + else: + + st.markdown('### CM script "{}" ({})'.format(alias, uid)) + + repo_meta = recipe.repo_meta + + # Basic run + tags = meta['tags_help'] if meta.get('tags_help','')!='' else ' '.join(meta['tags']) + + x1 = misc.make_url(tags.replace(' ',','), key = 'tags', action='scripts', md=False, skip_url_quote=True) + x2 = misc.make_url(meta['alias'], action='scripts', md=False) + x3 = misc.make_url(meta['uid'], action='scripts', md=False) + end_html='
Self links: tags or alias or UID
'.format(x1,x2,x3) + + extra_repo = '' if repo_meta['alias']=='mlcommons@ck' else '\ncm pull repo '+repo_meta['alias'] + + xtags = tags + if len(variations)>0: + if xtags!='': + xtags+=' ' + xtags+=' '.join(variations) + + x = ''' +```bash +pip install cmind -U +cm pull repo mlcommons@ck{} + +cm run script "{}" +``` + +A few other popular commands: +```bash +cmr "{}" --help +cmr "{}" --shell +cm run script "{}" --shell +cm docker script "{}" +cm gui script "{}" +``` + + '''.format(extra_repo,xtags,xtags,xtags,xtags,xtags,xtags) + + + + + # Check original link + + url = repo_meta.get('url','') + if url=='' and repo_meta.get('git', False): + url = 'https://github.com/'+repo_meta['alias'].replace('@','/') + + url_readme = '' + url_readme_extra = '' + url_meta_description = '' + url_customize = '' + + if url!='': + # Recreate GitHub path + if not url.endswith('/'): url=url+'/' + + url += 'tree/master/' + + if repo_meta.get('prefix','')!='': + url += repo_meta['prefix'] + + if not url.endswith('/'): url=url+'/' + + url += 'script/'+alias + + # Check README.md + z = os.path.join(recipe.path, 'README.md') + if os.path.isfile(z): + url_readme = url+'/README.md' + + # Check README.extra.md + z = os.path.join(recipe.path, 'README-extra.md') + if os.path.isfile(z): + url_readme_extra = url+'/README-extra.md' + + # Check customize.py + z = os.path.join(recipe.path, 'customize.py') + if os.path.isfile(z): + url_customize = url+'/customize.py' + + # Check _cm.yaml or _cm.json + for z in ['_cm.yaml', '_cm.json']: + y = os.path.join(recipe.path, z) + if os.path.isfile(y): + url_meta_description = url+'/'+z + + url_gui = url_prefix_script+'&name='+alias+','+uid+'&gui=true' + + z = '* ***Check [open source code (Apache 2.0 license)]({}) at GitHub.***\n'.format(url) + z += '* ***Check [detailed auto-generated README on GitHub]({}).***\n'.format(url_readme) + z += '* ***Check [experimental GUI]({}) to run this script.***\n'.format(url_gui) + z += '---\n' + + st.markdown(z) + + st.markdown('Default run on Linux, Windows, MacOS and any other OS (check [CM installation guide]({}) for more details):\n{}\n'.format(url_prefix + '?action=install', x)) + + st.markdown('*The [Collective Mind concept](https://doi.org/10.5281/zenodo.8105339) is to gradually improve portability and reproducibility of common automation recipes based on user feedback' + ' while keeping the same human-friendly interface. If you encounter issues, please report them [here](https://github.com/mlcommons/ck/issues) ' + ' to help this community project!*') + + + + if url_readme_extra!='': + st.markdown('* See [extra README]({}) for this automation recipe at GitHub.'.format(url_readme_extra)) + + if url_meta_description!='': + st.markdown('* See [meta description]({}) for this automation recipe at GitHub.'.format(url_meta_description)) + + if url_customize!='': + st.markdown('* See [customization python code]({}) for this automation recipe at GitHub.'.format(url_customize)) + + # Check dependencies + r = misc.get_all_deps_tags({'meta':meta, 'st':st}) + if r['return']>0: return r + + all_deps_tags = r['all_deps_tags'] + + if len(all_deps_tags)>0: + st.markdown('**Dependencies on other CM scripts:**') + + x='' + for t in sorted(all_deps_tags): + # Test that it's not just extending tags: + if t.startswith('_') or ',' not in t: + continue + + url_deps = url_prefix_script+'&tags='+t + + x+='* [{}]({})\n'.format(t, url_deps) + + st.markdown(x) + + + else: + categories={} + + for l in sorted(lst, key=lambda x: ( + x.meta.get('alias','') + )): + + category = l.meta.get('category','') + if category == '': category = 'Unsorted' + + if category not in categories: + categories[category]=[] + + categories[category].append(l) + + if len(categories)>1: + category_selection = [''] + sorted(list(categories.keys()), key = lambda v: v.upper()) + + # Creating compute selector + category_id = st.selectbox('Prune by category:', + range(len(category_selection)), + format_func=lambda x: category_selection[x], + index = 0, + key = 'category') + + if category_id>0: + category_key = category_selection[category_id] + categories = {category_key:categories[category_key]} + + # Check number of recipes + recipes = 0 + for category in sorted(categories, key = lambda v: v.upper()): + recipes += len(categories[category]) + + x = ''' + + Found {} automation recipes: + + '''.format(str(recipes)) + st.write(x, unsafe_allow_html = True) + + + for category in sorted(categories, key = lambda v: v.upper()): + md = '### {}'.format(category)+'\n' + + for recipe in categories[category]: + meta = recipe.meta + + alias = meta['alias'] + uid = meta['uid'] + + url = url_prefix_script+'&name='+alias+','+uid + + md += '* [{}]({})'.format(alias, url)+'\n' + + st.markdown(md) + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/run.bat b/script/gui/run.bat new file mode 100644 index 0000000000..0e6029ed7a --- /dev/null +++ b/script/gui/run.bat @@ -0,0 +1,2 @@ +streamlit run %CM_TMP_CURRENT_SCRIPT_PATH%\%CM_GUI_APP%.py %CM_GUI_EXTRA_CMD% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/gui/run.sh b/script/gui/run.sh new file mode 100644 index 0000000000..135cfe54ff --- /dev/null +++ b/script/gui/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +streamlit run ${CM_TMP_CURRENT_SCRIPT_PATH}/${CM_GUI_APP}.py ${CM_GUI_EXTRA_CMD} +test $? -eq 0 || exit $? diff --git a/script/gui/script.py b/script/gui/script.py new file mode 100644 index 0000000000..9a8bc0cfeb --- /dev/null +++ b/script/gui/script.py @@ -0,0 +1,484 @@ +# Developer(s): Grigori Fursin + +import streamlit as st +import os +import cmind + +import misc + +def page(i): + + st = i['st'] + params = i['params'] + script_path = i['script_path'] + script_alias = i['script_alias'] + script_tags = i['script_tags'] + skip_bottom = i.get('skip_bottom', False) + + extra = i.get('extra', {}) + + meta = i['script_meta'] + repo_meta = i.get('script_repo_meta', None) + + no_run = os.environ.get('CM_GUI_NO_RUN', '') + + gui_meta = meta.get('gui',{}) + + gui_func = gui_meta.get('use_customize_func', '') + if gui_func!='': + ii = {'streamlit_module':st, + 'meta':meta} + return cmind.utils.call_internal_module(None, os.path.join(script_path, 'dummy') , + 'customize', gui_func, ii) + + st.markdown("""---""") + + if gui_meta.get('title','')!='': + title = gui_meta['title'] + + + # Set title +# st.title('[Collective Mind](https://github.com/mlcommons/ck)') + + url_script = 'https://github.com/mlcommons/ck' + if repo_meta != None and script_alias!='': + url = repo_meta.get('url','') + if url=='' and repo_meta.get('git', False): + url = 'https://github.com/'+repo_meta['alias'].replace('@','/') + + if url!='': + # Recreate GitHub path + if not url.endswith('/'): url=url+'/' + + url += 'tree/master/' + + if repo_meta.get('prefix','')!='': + url += repo_meta['prefix'] + + if not url.endswith('/'): url=url+'/' + + url += 'script/'+script_alias + + url_script = url + + hide = params.get('hide_script_customization', False) + + if script_alias!='': + show_customize = st.toggle('**Customize input for the CM script "[{}]({})"**'.format(script_alias, url_script), value = not hide) + hide = not show_customize + + + + # Check if found path and there is meta + # TBD (Grigori): need to cache it using @st.cache + variation_groups = {} + default_variations = [] + variation_md = {} + variation_alias = {} + + st_inputs = {} + + st_variations = {} + + if len(meta)>0: + variations = meta.get('variations',{}) + + default_variation = meta.get('default_variation','') + + variation_keys = sorted(list(variations.keys())) + + for variation_key in sorted(variation_keys): + variation = variations[variation_key] + + alias = variation.get('alias','').strip() + + if alias!='': + aliases = variation_alias.get(alias, []) + if variation_key not in aliases: + aliases.append(variation_key) + variation_alias[alias]=aliases + + # Do not continue this loop if alias + continue + + if 'default_gui' in variation: + default = variation['default_gui'] + else: + default = variation.get('default', False) + + if not default: + # Check outdated + if default_variation == variation_key: + default = True + + extra1 = '' + extra2 = '' + if default: + extra1 = '**' + extra2 = '** (default)' + + default_variations.append(variation_key) + + group = variation.get('group','') + + if variation_key.endswith('_'): + group = '*internal*' + elif group == '': + group = '*no-group*' + + if group not in variation_groups: + variation_groups[group]=[] + + variation_groups[group].append(variation_key) + + # Prepare variation_groups + if len(variations)>0: + if not hide: + st.markdown('**Select variations to update multiple flags and environment variables:**') + + variation_groups_order = meta.get('variation_groups_order',[]) + for variation in sorted(variation_groups): + if variation not in variation_groups_order: + variation_groups_order.append(variation) + + for group_key in variation_groups_order: + group_key_cap = group_key.replace('-',' ').capitalize() + if not group_key.startswith('*'): + y = [''] + + index = 0 + selected_index = 0 + for variation_key in sorted(variation_groups[group_key]): + index += 1 + y.append(variation_key) + if variation_key in default_variations: + selected_index=index + + key2 = '~~'+group_key + + x = params.get(key2, None) + if x!=None and len(x)>0 and x[0]!=None: + x = x[0] + if x in y: + selected_index = y.index(x) if x in y else 0 + + if hide: + st_variations[key2] = sorted(y)[selected_index] + else: + st_variations[key2] = st.selectbox(group_key_cap, sorted(y), index=selected_index, key=key2) + + elif group_key == '*no-group*': + for variation_key in sorted(variation_groups[group_key]): + v = False + if variation_key in default_variations: + v=True + + key2 = '~'+variation_key + + x = params.get(key2, None) + if x!=None and len(x)>0 and x[0]!=None: + if x[0].lower()=='true': + v = True + elif x[0].lower()=='false': + v = False + + if hide: + st_variations[key2] = v + else: + st_variations[key2] = st.checkbox(variation_key.capitalize(), key=key2, value=v) + + + # Prepare inputs + input_desc=meta.get('input_description',{}) + + if len(input_desc)>0: + + sort_desc = {} + sort_keys = [] + for k in input_desc: + sort = input_desc[k].get('sort',0) + if sort>0: + sort_desc[k]=sort + if len(sort_desc)>0: + sort_keys = sorted(sort_desc, key = lambda k: sort_desc[k]) + + other_keys = sorted([k for k in input_desc if input_desc[k].get('sort',0)==0]) + + all_keys = [] if len(sort_keys)==0 else sort_keys + all_keys += other_keys + + if not hide: + if len(sort_keys)>0: + st.markdown('**Select main flags:**') + else: + st.markdown('**Select all flags:**') + + other_flags = False + for key in all_keys: + value = input_desc[key] + + if len(sort_keys)>0 and value.get('sort',0)==0 and not other_flags: + if not hide: + st.markdown('**Select other flags:**') + other_flags = True + + ii={'key':key, + 'desc':value, + 'params':params, + 'st':st, + 'st_inputs':st_inputs, + 'hide':hide} + + r2 = misc.make_selector(ii) + if r2['return']>0: return r2 + + # Check tags + selected_variations=[] + for k in st_variations: + v = st_variations[k] + + if k.startswith('~~'): + k2 = k[2:] + elif k.startswith('~'): + k2 = k[1:] + + if type(v)==bool: + if v: + selected_variations.append('_'+k2) + elif v!='': + selected_variations.append('_'+v) + + x = script_tags + if ' ' in script_tags: + if len(selected_variations)>0: + x+=' '+' '.join(selected_variations) + + tags = '"{}"'.format(x) + else: + if len(selected_variations)>0: + x+=','+','.join(selected_variations) + + tags = '--tags={}'.format(x) + + + + + # Add extras to inputs + add_to_st_inputs = extra.get('add_to_st_inputs',{}) + if len(add_to_st_inputs)>0: + st_inputs.update(add_to_st_inputs) + + + ############################################################################ + st.markdown("""---""") + st.markdown('**Run this CM script (Linux/MacOS/Windows):**') + + + x = '' + + extra_notes_online = extra.get('extra_notes_online', '') + if extra_notes_online != '': x+=' [ '+extra_notes_online+' ] ' + + extra_faq_online = extra.get('extra_faq_online', '') + if extra_faq_online != '': x+=' [ '+extra_faq_online+' ] ' + + if x !='': + st.markdown('*'+x.strip()+'*') + + + + host_os_windows = False if os.name != 'nt' else True + host_os_use_windows = st.toggle('Run on Windows?', value = host_os_windows) + if host_os_use_windows: + var1 = '^' + host_os_flag = 'windows' +# st.markdown('*Check how to install [a few dependencies](https://github.com/mlcommons/ck/blob/master/docs/installation.md#windows) on Windows.*') + else: + var1 = '\\' + host_os_flag = 'linux' + + + show_cm_install = st.toggle('Install MLCommons Collective Mind', value=False) + + if show_cm_install: + + import playground_install + extra = {'skip_header': True, + 'run_on_windows': host_os_use_windows} + r = playground_install.page(st, params, extra) + if r['return']>0: return r + + + st.markdown('---') + + + ############################################################################ + shell = st.toggle('Open shell after executing CM script?', value=False) + if shell: + st_inputs['~shell'] = True + + ############################################################################ + flags_dict = {} + flags = '' + + for key in st_inputs: + value = st_inputs[key] + key2 = key[1:] + + if value!='' and (type(value)!=bool or value==True): + flags+=' '+var1+'\n --'+key2 + + z = True + if type(value)!=bool: + x = str(value) + z = x + + if ' ' in x or ':' in x or '/' in x or '\\' in x: + x='"'+x+'"' + flags+='='+x + + flags_dict[key2]=z + + + + + + + ############################################################################ + run_via_docker = False + if not extra.get('skip_script_docker_func', False) and len(meta.get('docker',{}))>0: + run_via_docker = st.toggle('Use Docker', key='run_via_docker', value=False) + + if run_via_docker: + st.markdown("*WARNING: CM automatically generates containers for a give script - it's a beta functionality - feel free to [test and provide feedback](https://discord.gg/JjWNWXKxwT)!*") + + action = 'docker' if run_via_docker else 'run' + cli = 'cm {} script {} {}\n'.format(action, tags, flags) + + + ############################################################################ + use_experiment_from_extra = extra.get('use_experiment', False) + + use_experiment = st.toggle('Use CM experiment for reproducibility', key='use_cm_experiment', value=use_experiment_from_extra) + + extra_cm_prefix = '' + if use_experiment: + cli = 'cm run experiment --tags={} -- {}\n '.format("repro,"+script_tags, var1) + cli + + ############################################################################ + + extra_setup = extra.get('extra_setup','').strip() + if len(extra_setup)>2: + show_extra_setup_notes = st.toggle('Show extra setup notes?', value = True) + + if show_extra_setup_notes: +# st.markdown('---') + st.markdown(extra_setup) +# st.markdown('---') + + + show_python_api = st.toggle('Run via Python API', value=False) + + # Python API + if show_python_api: + + final_script_tags = script_tags + if len(selected_variations)>0: + for sv in selected_variations: + final_script_tags += ' '+sv + final_script_tags = final_script_tags.replace(' ',',') + + if use_experiment: + dd = { + 'action': 'run', + 'automation': 'experiment,a0a2d123ef064bcb', + 'tags': script_tags, + 'out': 'con' + } + + unparsed_cmd = ['cm', + 'run', + 'script,5b4e0237da074764', + '--tags='+final_script_tags] + + for flag in flags_dict: + value = flags_dict[flag] + unparsed_cmd.append('--' + flag + '=' + str(value)) + + dd['unparsed_cmd'] = unparsed_cmd + + else: + dd = { + 'action':action, + 'automation':'script,5b4e0237da074764', + } + + dd['tags']=final_script_tags + + dd['out']='con' + + dd.update(flags_dict) + + import json + dd_json=json.dumps(dd, indent=2) + dd_json=dd_json.replace(': true', ': True').replace(': false', ': False') + + y = 'import cmind\n' + y+= 'r = cmind.access('+dd_json+')\n' + y+= 'if r[\'return\']>0: print (r[\'error\'])\n' + + x=''' + ```python + {} + '''.format(y) + + # st.write(x.replace('\n','
\n'), unsafe_allow_html=True) + + st.markdown(x) + + + + ############################################################################ + show_cli = st.toggle('Run from the command line', value = True) + + if show_cli: + # Add explicit button "Run" + cli = st.text_area('', cli, height=600) + + if no_run=='' and st.button("Run in the new terminal"): + cli = cli+var1+'--pause\n' + + cli = cli.replace(var1, ' ').replace('\n',' ') + + if os.name == 'nt': + cmd2 = 'start cmd /c {}'.format(cli) + else: + cli2 = cli.replace('"', '\\"') + + prefix = os.environ.get('CM_GUI_SCRIPT_PREFIX_LINUX','') + if prefix!='': prefix+=' ' + + cmd2 = prefix + 'bash -c "{}"'.format(cli2) + + print ('Running command:') + print ('') + print (' {}'.format(cmd2)) + print ('') + + os.system(cmd2) + + # Some info + x = ''' + +
+ We would like to thank all Collective Mind users and contributors + for supporting this collaborative engineering effort -
+ please don't hesitate report issues or suggest features at CM GitHub! +
+ ''' + st.write(x, unsafe_allow_html = True) + + return {'return':0} + +if __name__ == "__main__": + main() diff --git a/script/gui/tests/README.md b/script/gui/tests/README.md new file mode 100644 index 0000000000..ac40c80cf9 --- /dev/null +++ b/script/gui/tests/README.md @@ -0,0 +1,3 @@ +http://localhost:8501/?action=scripts&name=run-mlperf-inference-app,4a5d5b13fd7e4ac8&gui=true&@implementation=nvidia-original&@device=cuda +http://localhost:8501/?action=scripts&name=run-mlperf-inference-app,4a5d5b13fd7e4ac8&gui=true&@implementation=nvidia-original&@device=cuda&@submitter=xyz&clean=false +http://localhost:8501/?action=scripts&name=run-mlperf-inference-app,4a5d5b13fd7e4ac8&gui=true&@implementation=nvidia-original&@device=cuda&@submitter=xyz&clean=false&~~submission-generation-style=full&~dashboard=false diff --git a/script/gui/tests/generate_password.py b/script/gui/tests/generate_password.py new file mode 100644 index 0000000000..145a46dbd3 --- /dev/null +++ b/script/gui/tests/generate_password.py @@ -0,0 +1,13 @@ +import bcrypt + +#salt = bcrypt.gensalt() +# TBD: temporal hack to demo password protection for experiments +#salt = bcrypt.gensalt() + +pwd = input('Password: ') +pwd = pwd.strip() + +password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' +password_hash2 = bcrypt.hashpw(pwd.encode('utf-8'), password_salt) + +print ('"password_hash":"{}"'.format(password_hash2.decode('utf-8'))) diff --git a/script/gui/tests/test.cmd b/script/gui/tests/test.cmd new file mode 100644 index 0000000000..e25099a378 --- /dev/null +++ b/script/gui/tests/test.cmd @@ -0,0 +1 @@ +cm run script --tags=gui --script="app generic mlperf inference" --prefix="gnome-terminal --" \ No newline at end of file diff --git a/script/gui/tests/test2.cmd b/script/gui/tests/test2.cmd new file mode 100644 index 0000000000..20f417aa9b --- /dev/null +++ b/script/gui/tests/test2.cmd @@ -0,0 +1 @@ +cm run script --tags=gui --script="app generic mlperf inference" --prefix=" " \ No newline at end of file diff --git a/script/gui/tests/test3.cmd b/script/gui/tests/test3.cmd new file mode 100644 index 0000000000..60a3d6a29d --- /dev/null +++ b/script/gui/tests/test3.cmd @@ -0,0 +1 @@ +cm run script --tags=gui --script="run mlperf inference generate-run-cmds" --prefix="gnome-terminal --" \ No newline at end of file diff --git a/script/gui/tests/test4.cmd b/script/gui/tests/test4.cmd new file mode 100644 index 0000000000..2cd19e9147 --- /dev/null +++ b/script/gui/tests/test4.cmd @@ -0,0 +1 @@ +cm run script --tags=gui,_graph diff --git a/script/gui/tests/test4a.cmd b/script/gui/tests/test4a.cmd new file mode 100644 index 0000000000..86c64b3cc1 --- /dev/null +++ b/script/gui/tests/test4a.cmd @@ -0,0 +1,2 @@ +cm run script --tags=gui,_graph --exp_tags=test + diff --git a/script/gui/tests/test4b.cmd b/script/gui/tests/test4b.cmd new file mode 100644 index 0000000000..9897defc28 --- /dev/null +++ b/script/gui/tests/test4b.cmd @@ -0,0 +1,2 @@ +cm run script --tags=gui,_graph --exp_name=mlperf-inference--all--datacenter--closed--image-classification--server + diff --git a/script/gui/tests/test5.cmd b/script/gui/tests/test5.cmd new file mode 100644 index 0000000000..ea5942d1bf --- /dev/null +++ b/script/gui/tests/test5.cmd @@ -0,0 +1 @@ +cm run script "gui _playground" diff --git a/script/import-experiment-to-sqlite/README.md b/script/import-experiment-to-sqlite/README.md new file mode 100644 index 0000000000..a65fb02a08 --- /dev/null +++ b/script/import-experiment-to-sqlite/README.md @@ -0,0 +1,155 @@ +
+Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Script flags mapped to environment](#script-flags-mapped-to-environment) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
+ +*Note that this README is automatically generated - don't edit!* + +### About + +#### Summary + +* Category: *DevOps automation.* +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/master/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-experiment-to-sqlite)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *import,experiment2sqlite* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@ck``` + + +#### Run this script from command line + +1. `cm run script --tags=import,experiment2sqlite [--input_flags]` + +2. `cmr "import experiment2sqlite" [--input_flags]` + +* `input_flags` can be seen [here](#script-flags-mapped-to-environment) + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,experiment2sqlite' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="import,experiment2sqlite"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=import,experiment2sqlite) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "import experiment2sqlite" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--db_name=value` → `CM_SQLITE_DB_NAME=value` +* `--exp_name=value` → `CM_SQLITE_EXP_NAME=value` +* `--exp_tags=value` → `CM_SQLITE_EXP_TAGS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "db_name":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Script workflow, dependencies and native scripts + +
+Click here to expand this section. + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-experiment-to-sqlite/_cm.yaml)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * set,sqlite-dir + - CM script: [set-sqlite-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-sqlite-dir) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-experiment-to-sqlite/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-experiment-to-sqlite/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-experiment-to-sqlite/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-experiment-to-sqlite/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/import-experiment-to-sqlite/_cm.yaml) +
+ +___ +### Script output +`cmr "import experiment2sqlite" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/script/import-mlperf-inference-to-experiment/README-extra.md b/script/import-mlperf-inference-to-experiment/README-extra.md new file mode 100644 index 0000000000..64b604d16c --- /dev/null +++ b/script/import-mlperf-inference-to-experiment/README-extra.md @@ -0,0 +1,82 @@ +# About + +This portable script converts raw results from the [MLPerf™ Inference benchmark]( https://github.com/mlcommons/inference ) +to the [MLCommons CM format](https://github.com/mlcommons/ck) for the [Collective Knowledge Playground](https://x.cKnowledge.org). + +The goal is to make it easier for the community to analyze MLPerf inference results, +add derived metrics such as performance/Watt and constraints, +and link reproducibility reports as shown in these examples: +* [Power efficiency to compare Qualcomm, Nvidia and Sima.ai devices](https://cKnowledge.org/mlcommons-mlperf-inference-gui-derived-metrics-and-conditions) +* [Reproducibility report for Nvidia Orin](https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--closed--image-classification--offline&result_uid=3751b230c800434a) + +Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results). + +You can see these results at [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,all). + +## Usage + +We have tested this portable CM script on Ubuntu and Windows. + +Install [MLCommons CM framework](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@ck +``` + +Pull already imported results (v2.0, v2.1, v3.0, v3.1) from this [mlcommons@cm4mlperf-results repo](https://github.com/mlcommons/cm4mlperf-results): + +```bash +cm pull repo mlcommons@cm4mlperf-results --checkout=dev +``` + +Install repository with raw MLPerf inference benchmark results with {NEW VERSION}: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/inference_results_v{NEW VERSION}" --extra_cache_tags=mlperf-inference-results,version-{NEW VERSION} --time --space + +``` + +Use the following CM command if you want to analyze private MLPerf results under submission +(you need to be a submitter or collaborate with cTuning.org and cKnowledge.org to have an access to such repository): + +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_inference_v4.0" --env.CM_GIT_CHECKOUT=main --extra_cache_tags=mlperf-inference-results,version-4.0-private --time --space +``` + +Convert all raw MLPerf results into CM experiment entries - it can take 5..15 minutes to run submission checker +with raw MLPerf results before converting them to the fast CM format (skip target_repo if you want +to record results to the `local` CM repository): + +```bash +cm run script "import mlperf inference to-experiment" --target_repo=mlcommons@cm4mlperf-results --time --space +``` + +or for a specific submitter: + +```bash +cm run script "import mlperf inference to-experiment" --submitter=CTuning +``` + + +If you already generated `summary.csv` in your current directory, you can skip submission checker as follows: + +```bash +cm run script "import mlperf inference to-experiment _skip_checker" +``` + +Visualize results on your local machine via CK playground GUI: + +```bash +cm run script "gui _playground" +``` + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,all). + +## Further analysis of results + +Please check this [README](https://github.com/mlcommons/cm4mlperf-results#how-to-update-this-repository-with-new-results). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/script/import-mlperf-inference-to-experiment/README.md b/script/import-mlperf-inference-to-experiment/README.md new file mode 100644 index 0000000000..1a02202258 --- /dev/null +++ b/script/import-mlperf-inference-to-experiment/README.md @@ -0,0 +1,154 @@ +Automatically generated README for this automation recipe: **import-mlperf-inference-to-experiment** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=import-mlperf-inference-to-experiment,72099fa962ea499c) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-inference-to-experiment)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment` + +`cm run script --tags=import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment[,variations] [--input_flags]` + +*or* + +`cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment"` + +`cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "import mlperf inference mlperf-inference experiment 2experiment to-experiment[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_skip_checker` + - Environment variables: + - *CM_SKIP_SUBMISSION_CHECKER*: `True` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` +* `--target_repo=value` → `CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "submitter":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-inference-to-experiment/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-inference-to-experiment/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-inference-to-experiment/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-inference-to-experiment/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-inference-to-experiment/_cm.yaml) + +___ +### Script output +`cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/import-mlperf-inference-to-experiment/_cm.yaml b/script/import-mlperf-inference-to-experiment/_cm.yaml new file mode 100644 index 0000000000..04f9067c0b --- /dev/null +++ b/script/import-mlperf-inference-to-experiment/_cm.yaml @@ -0,0 +1,38 @@ +# Identification of this CM script +alias: import-mlperf-inference-to-experiment +uid: 72099fa962ea499c + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - import + - mlperf + - inference + - mlperf-inference + - experiment + - 2experiment + - to-experiment + +input_mapping: + target_repo: CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO + submitter: CM_MLPERF_SUBMITTER + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + +variations: + skip_checker: + env: + CM_SKIP_SUBMISSION_CHECKER: yes diff --git a/script/import-mlperf-inference-to-experiment/customize.py b/script/import-mlperf-inference-to-experiment/customize.py new file mode 100644 index 0000000000..486bc76d15 --- /dev/null +++ b/script/import-mlperf-inference-to-experiment/customize.py @@ -0,0 +1,332 @@ +import cmind as cm +from cmind import utils + +import os +import subprocess +import csv +import json +import copy + + +file_summary = 'summary.csv' +file_summary_json = 'mlperf-inference-summary-{}.json' +file_result = 'cm-result.json' + +model2task = { + "resnet":"image-classification", + "retinanet":"object-detection", + "ssd-small":"object-detection", + "ssd-large": "object-detection", + "rnnt":"speech-recognition", + "bert-99":"language-processing", + "bert-99.9":"language-processing", + "gptj-99":"language-processing", + "gptj-99.9":"language-processing", + "llama2-70b-99":"language-processing", + "llama2-70b-99.9":"language-processing", + "dlrm-99":"recommendation", + "dlrm-v2-99":"recommendation", + "dlrm-99.9":"recommendation", + "dlrm-v2-99.9":"recommendation", + "3d-unet-99":"image-segmentation", + "3d-unet-99.9":"image-segmentation", + "stable-diffusion-xl":"text-to-image" +} + +def preprocess(i): + + env = i['env'] + + cur_dir = os.getcwd() + + # Query cache for results dirs + r = cm.access({'action':'find', + 'automation':'cache,541d6f712a6b464e', + 'tags':'get,repo,mlperf-inference-results'}) + if r['return']>0: return r + + lst = r['list'] + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = 'v'+t[8:] + break + + skip_submission_checker = env.get('CM_SKIP_SUBMISSION_CHECKER','') in ['yes','True'] + + print ('') + print ('Processing results in path: {}'.format(path)) + print ('Version: {}'.format(version)) + print ('') + + if skip_submission_checker: + if not os.path.isfile(file_summary): + return {'return':1, 'error':'{} not found'.format(file_summary)} + else: + if os.path.isfile(file_summary): + os.remove(file_summary) + + print ('* Running submission checker ...') + + xenv = {} + + submitter = env.get('CM_MLPERF_SUBMITTER', '') + if submitter != '': + xenv['CM_MLPERF_SUBMITTER'] = submitter + + ii = {'action':'run', + 'automation':'script', + 'tags':'run,mlperf,inference,submission,checker', + 'extra_args':' --skip-extra-files-in-root-check', + 'submission_dir':path} + + if len(xenv)>0: + ii['env'] = xenv + + if version!='': + print (' Version detected from cache tags: {}'.format(version)) + ii['version']=version + + r = cm.access(ii) + # Ignore if script fails for now (when some results are wrong) + if r['return']>0 and r['return']!=2: + return r + + if r['return']>0: + print ('') + print ('WARNING: script returned non-zero value - possible issue - please check!') + print ('') + input ('Press Enter to continue') + print ('') + + r = convert_summary_csv_to_experiment(path, version, env) + if r['return']>0: return r + + return {'return':0} + + +def convert_summary_csv_to_experiment(path, version, env): + print ('* Processing MLPerf repo in cache path: {}'.format(path)) + + cur_dir = os.getcwd() + + # Get Git URL + os.chdir(path) + + burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + url = burl.decode('UTF-8').strip() + + print (' Git URL: {}'.format(url)) + + os.chdir(cur_dir) + + if os.path.isfile(file_summary): + summary = [] + + with open (file_summary, encoding = 'utf-8') as fcsv: + csv_reader = csv.DictReader(fcsv) + + for rows in csv_reader: + result = {} + + keys = rows.keys() + + for k in keys: + v = rows[k] + + if v == 'False': + v=False + elif v == 'True': + v=True + else: + try: + v=float(v) + + if v==int(v): + v=int(v) + except ValueError: + pass + + result[k] = v + + # Add extra tags + if url!='': + result['git_url']=url + + location = result.get('Location','') + if location != '': + result['url']=url+'/tree/master/'+location + + accuracy = result.get('Accuracy', 0.0) +# +# print (accuracy, type(accuracy)) + if accuracy!=None and accuracy!='None' and accuracy>0: + result['Accuracy_div_100'] = float('{:.5f}'.format(result['Accuracy']/100)) + + # Add ratios + + + # Append to summary + summary.append(result) + + r=utils.save_json(file_summary_json.format(version), summary) + if r['return']>0: return r + + # Create virtual experiment entries + experiment = {} + + for result in summary: + + # Create name + mlperfmodel = result['MlperfModel'] + task = model2task[mlperfmodel] + + system_type = result['SystemType'] + + division = result['Division'] + has_power = result.get('has_power', False) + + if division == 'network': + xdivision = 'closed-network' + else: + xdivision = division.lower() + if has_power: + xdivision += '-power' + + # If datacenter,edge - remove ,edge to be consistent with https://mlcommons.org/en/inference-datacenter-21/ + j=system_type.find(',') + if j>=0: + system_type=system_type[:j] + + scenario = result['Scenario'].lower() + + name = 'mlperf-inference--{}--'+system_type+'--'+xdivision+'--'+task+'--'+scenario + + name_all = name.format('all') + name_ver = name.format(version) + + for name in [name_all, name_ver]: + if name not in experiment: experiment[name]=[] + experiment[name].append(result) + + # Checking experiment + env_target_repo=env.get('CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO','').strip() + target_repo='' if env_target_repo=='' else env_target_repo+':' + + + print ('') + for name in experiment: + print (' Preparing experiment artifact "{}"'.format(name)) + + tags = name.split('--') + if 'mlperf' not in tags: tags.insert(0, 'mlperf') + + # Checking if experiment already exists + r = cm.access({'action':'find', + 'automation':'experiment,a0a2d123ef064bcb', + 'artifact':target_repo+name}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + r = cm.access({'action':'add', + 'automation':'experiment,a0a2d123ef064bcb', + 'artifact':target_repo+name, + 'tags':tags}) + if r['return']>0: return r + + path = r['path'] + else: + path = lst[0].path + + results = experiment[name] + + # Check if already date directory + dirs = os.listdir(path) + + path2 = '' + for d in dirs: + dd = os.path.join(path, d) + if os.path.isdir(dd): + path2 = dd + break + + if path2=='': + + r = utils.get_current_date_time({}) + if r['return']>0: return r + + date_time = r['iso_datetime'].replace(':','-').replace('T','.') + + path2 = os.path.join(path, date_time) + + os.makedirs(path2) + + # Check if cm-result.json + fresult = os.path.join(path2, file_result) + + if os.path.isfile(fresult): + r=utils.load_json(fresult) + if r['return']>0: return r + + existing_results = r['meta'] + + # Need to check which ones to add + for result in existing_results: + found = False + + # New results + for result2 in results: + matched = True + + # Need to iterate over keys in the new results since old results can have more keys (derivates, etc) + for k in result2: + if k!='uid': + if k not in result or result2[k]!=result[k]: + matched = False + break + + if matched: + found = True + break + + if not found: + results.append(result) + + # Check extra keys + final_results=[] + for result in results: + # Generate UID + if 'uid' not in result: + r=utils.gen_uid() + if r['return']>0: return r + + result['uid'] = r['uid'] + + # Get Result and Units together + if 'Result' in result and 'Units' in result: + result['Result_Units']=result['Units'] + + # Temporal hack for Power to separate power from the graph + units = result.get('Units','') + if units == 'Watts' or 'joules' in units: + if 'Result_Power' not in result: + result['Result_Power']=result['Result'] + result['Result']=None + + # Write results + r=utils.save_json(fresult, results) + if r['return']>0: return r + + return {'return':0} diff --git a/script/import-mlperf-tiny-to-experiment/README-extra.md b/script/import-mlperf-tiny-to-experiment/README-extra.md new file mode 100644 index 0000000000..105e7ea4a6 --- /dev/null +++ b/script/import-mlperf-tiny-to-experiment/README-extra.md @@ -0,0 +1,68 @@ +# About + +This portable script converts raw results from the [TinyMLPerf™ benchmark]( https://github.com/mlcommons/tiny ) +to the [MLCommons CM format](https://github.com/mlcommons/ck) for the [Collective Knowledge Playground](https://x.cKnowledge.org). + +The goal is to make it easier for the community to analyze MLPerf inference results, +add derived metrics such as performance/Watt and constraints, +and link reproducibility reports as shown in these examples: +* [Power efficiency to compare Qualcomm, Nvidia and Sima.ai devices](https://cKnowledge.org/mlcommons-mlperf-inference-gui-derived-metrics-and-conditions) +* [Reproducibility report for Nvidia Orin](https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--closed--image-classification--offline&result_uid=3751b230c800434a) + +Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results). + +You can see these results at [MLCommons CK playground](You can see aggregated results [here](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all). + +## Usage + +We have tested this portable CM script on Ubuntu and Windows. + +Install [MLCommons CM framework](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@ck +``` + +Install repositories with raw MLPerf inference benchmark results: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/tiny_results_v0.7" --extra_cache_tags=mlperf-tiny-results,version-0.7 +cm run script "get git repo _repo.https://github.com/mlcommons/tiny_results_v1.0" --extra_cache_tags=mlperf-tiny-results,version-1.0 +cm run script "get git repo _repo.https://github.com/mlcommons/tiny_results_v1.1" --extra_cache_tags=mlperf-tiny-results,version-1.1 +```` + +You can also add private results to compare submissions locally before they become public: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" --extra_cache_tags=mlperf-tiny-results,version-1.1-private +``` + +You can use a specific checkout/branch as follows: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" \ + --extra_cache_tags=mlperf-tiny-results,version-1.1-private,generate_final_report \ + --depth="" \ + --branch=generate_final_report +``` + +Convert raw MLPerf results into CM experiment entries: +```bash +cm run script "import mlperf tiny to-experiment" +``` + +Visualize results on your local machine via CK playground GUI: +```bash +cm run script "gui _graph" --exp_tags=mlperf-tiny +``` + +You can then select the results you want to visualize and compare, +add derived metrics and set constaints as shown in the following example: + +![](assets/cm-visualization-and-customization-of-tinymlperf-results2.png) + + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/script/import-mlperf-tiny-to-experiment/README.md b/script/import-mlperf-tiny-to-experiment/README.md new file mode 100644 index 0000000000..ab2cf804d1 --- /dev/null +++ b/script/import-mlperf-tiny-to-experiment/README.md @@ -0,0 +1,137 @@ +Automatically generated README for this automation recipe: **import-mlperf-tiny-to-experiment** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=import-mlperf-tiny-to-experiment,83e3efd7611f469b) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-tiny-to-experiment)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment` + +`cm run script --tags=import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment [--input_flags]` + +*or* + +`cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment"` + +`cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--target_repo=value` → `CM_IMPORT_TINYMLPERF_TARGET_REPO=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "target_repo":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-tiny-to-experiment/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-tiny-to-experiment/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-tiny-to-experiment/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-tiny-to-experiment/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-tiny-to-experiment/_cm.yaml) + +___ +### Script output +`cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/import-mlperf-tiny-to-experiment/_cm.yaml b/script/import-mlperf-tiny-to-experiment/_cm.yaml new file mode 100644 index 0000000000..f6c36f795b --- /dev/null +++ b/script/import-mlperf-tiny-to-experiment/_cm.yaml @@ -0,0 +1,33 @@ +# Identification of this CM script +alias: import-mlperf-tiny-to-experiment +uid: 83e3efd7611f469b + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - import + - mlperf + - tiny + - mlperf-tiny + - experiment + - 2experiment + - to-experiment + +input_mapping: + target_repo: CM_IMPORT_TINYMLPERF_TARGET_REPO + + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm diff --git a/script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png b/script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png new file mode 100644 index 0000000000000000000000000000000000000000..8b5be23fb5045f61d914d9ff92f648a3a7c0acb9 GIT binary patch literal 118877 zcmce7byS>Bv*thu?(Ptr;O_1oAh^4`ySrO(cXzkou7eKl7Cgaa$?yB_?%s3m*>iXQ z*gt0G?V9eHe!HrkuBv|Ch;Q-|2(Y-Y0000%N>Wq_000jM0H7$K!T(A=4=exxV8Iq5 zBHyG$M2PJj?MyAKO#lF@1fK*x$pJz9AwA`0v9E}($PT05cr>6e9Fo6c_6b|iG@a@@ zo?x+BNGfULe_jp@B8pOh!ZOn#(EcoR^=WW%NNP?@`bAI49A&T1L11&kYrXqZuxU!U z=?s3J<&OmU!69v575J_KBq0dKfU-eZzI2VrekbpZfsy4-?3o+>`y=dcuoR!XNv`pM z2vP1q?xw(bu1mBq4GpF6nTQdD4&rOlWs}q`&QyF%chi;8lB0-$LBsN>1A0>v5%L_0 zSgH!~N8xFckXuZ%W=*qmuN$mTSdet+8LLFXPt-(7q2OrluVwwh(W)`%XcnM2uLlWy zzesK7_OJT;>VgPg$wrXgpn5ymqLh~PA7q1TS&sBf)N0S&DmgTQFkCF6$6H-n7)A+aW zUpQfb>kifh9cAmQRNbwXwAf%wT}gBqZzTB;8LI zDme^Z`^qM05Bu~}9xfi?0;*e!yJ2NMGB%F|ZXg!`Kq!-AN{9MOZ4?$%-;G2R&f}y0 zvG=``2^zq^_*j2&njH=k=3|nYlLCmz9cMqu7gtH?I@u<%gu+)?AD^PbhctwiQy0xS z7Tl3|ySD2^FPb&*#?eYs&u4$ymu%*pHr`9OG^qmsf&J`TxjoDoVKxuA;%Zl*2sygs zRuNI_4DZ(_|NX2{cL8u2SRn&(Yz)Tai?18@!dA|YG#lUNy2=YYl>27Nx9;N}&VzjT zh?+1pUYGVuH!yu{*y)s4b6{0dgsy%Pl#L5mq+ucGy8%UzOyAj8i3t3mVQ*&$&DJJp zGA`t5Q~H68YN7TuQSg}{p3qQ%@dS!e+l@p|gwm}FPQ@qtIRNkmYyZevQdFgfzRq&X z?=t$&&iY85Fbeu%ZfkWF-9u{9SEvyFX67CCB{v&O&aJlYJdf7UTt7EZ2=GSAx(9odZXZqNL5iu}n7+WqZL4}~{^|n5h>*_^EC9wk~=J$J~X_85dE2i-` zmB{MJ4?9Gb6G}tiKGNZMYBclg3HP_+6)T!t#cC~p^1$OA@PJP1D`a{OB?pJAnC5=N zAWatb;b=*i+2t`EIDnDu4!$ofIGw%81>Zu6z`khfqu*VslVMiHWO9*1=ey`u-mxkp2-A-LkjWMMCKtI62g4KvpP zHi#P$i{rI9JR;|MY%2S1+t8zAu<~}MqN@8vM-fe{QMQ)}P7FNcF>Lt&@qiHH*7Jc@ zotMoAA0=PaLtH=u9pa?q#0$NhNHeaqzFd~lxt^klpD4BRC%&$ z%dnzv*N0`2xk3mGEis!=J; zKRoQq-gps4ODE?u86QqWfY0&c2Yb(aA%cyZ-+SHg(#9t=#Xm=pE!`Ho-vHUm?348B zM4g=lw;MBGH{BwJ>(DV4F)-N9yKmTusAPAA6L0`ke4&b1Cv%}KciCHuHV#(j)}El7 z%j{S9vxD7HN5f10g4L3IFnF(uCN~MFq{7xs2Q0*-3i;P~F`q{}JC%z%6$k-gbn!R; z?9g))i(m7}In}N<0zV20P78KxrgC(lZ$priC()H5nNtX8)Q z$blmlRV4_`Dsf%Q+>@LjCFg=)2?Wvu7`>V%Jkz|}gjBYdd0HIan+18=Rbw-oJZClI zv5+L;&>ewXj_>h9)IHI6_*7~ZVR&2iftZvxlxc5s;=+3f56r6HH!iwkX-DE%^{|U5 zb@PRUoP@DWn!mdta5D>?R9a5mRca&l>ARc*)}GCR!znxO{yMbw_f_g^jBdGs<{bx^ znLNicEt5;HxP&J3?6L=|gdZJ&hw?5J3JoD6Fn@@q=JT8-Ty@r&wXMSX2y)easjOB2 z8H^ukO^1VO=v3Gh3tKj?v0{D9m*%=wrBc8~e^~_V64e0h$&Bys3_x)zw{vrZsZfef z{qM;h=iJeB72pUJ%pU+xC6Wz`_MCJ^gu!X5L-^}ymVF8a|N4_#$!0FvK2lTu`Ary4 zMQgJ<;JBnfhN$GTj*vaIOF-TMH{ziWZSUHDYX%5(0A+|~it@v0OIs{Xnw9uY;c5!5Phy0!-o#!CR z-tx^Ib#J?wh9r^2!U;M$B@iXgoBK#K59+HFR-@LHzl?KJx%Eu3%_J%3F4Pj)8wrQD zG&sCv>_xeEholw6#PtRnWg~AX*DyDH_qVF1n8c7(rowK{BKIL3AmV%TlWbF?-IgN1 z{-U+Oa<3tm5&rYwG_(wFS?%W;80*?v8oFsynxA@9yPg$Zk9fGTQ8OhS!iePxAuI$E zFY9wXK1FcmRa~scFAf|e2&T)spDnexcofz`-!iiGm};5+?n|c(yVgMAu7@m~nHreG zPXKtItum9MG^J)gZQh^K<{!X40>r9UR>ik4oQK0}ydhJ=R+0}#GnwE-q^x#R$lJig zH+zyO^qaUdrBn{G{aS%D@WWWtTKshFp1eNp4o=r0iFS^iqVj}YM<~RxzyEWUS)QDZ zS>7sQjfxH|V14$F2N(c=>r*;KU+U?yDElPRe<=PW>w4~X(Z*?9`C3#ROAB>CH?x{S zGex4N_&a*k+@^%?ML+O1|I+?MN)ch$_ENJisqkk_2 z9xR!v;6$*=on;F6D%WgUz<^`r1tz%tG-Oay@`X~H5c?AV(1F+u3-CX!Q6)5Uk!drq ziT-8D8fMV7y7{`YC-a`RV{bGhi&e7KQR%{};J*EoqBx!8Y8-6L@22#1!6S`%7Bqq0 zUEif`1mPdLzC zLyNVFizeJE7Wd0`yi= z-uv|dx-A`qzZh&xQj5vxr5v@;r6FPV6w~HXUO!=J`lo#oT-`D_QP0S8$tzCsaq4nu zkUd*I0{|d3zhlPB8V|@sNvj#06>->Xwpug+F{Ib5W)QMa98z#U3Gk@lYRxU}?@pEc z;d@ebQ`r6h67j#Dap_z3y2xWA2N*7SQi~LR=js!i{2t=?6PzR$MyQY2r2Q<9Vumjydxib1C?xMn+*`a1{e3P$ z<{i<=-RfO&c3?e*;SiP=PhI9uL(HPuNyz@h2o~yb2f&jxRYCkE)Q3;&J8K!JB~`)a z%fyVuPtn2Y^u%-f{*3V6pVbA#?Vf`R9OnO;NtJP9zcb_D1=Z6u#DF{U`_Zc*D2SsbdEyeEAnrVj^TOv$qVL2zu* zac=O9rE1_|G`n&2teg$&I5WxFpao+vxtYpKe4j;;_RDDmw0A+d3A?HJv|sjP?(D^- z2&q*eFAmA-Bp~4lsMPqe8{VvrIQ_)V!|1cD z3W3DrZ{`GrjPDPOEFqqx0RNt*#U`a)w*w9rW;crBd_9@mQzvJe!z8&UgowT(A(!Cv z4d;lFoZr8B&aX_Op15p3&oz8b>7E1gI9tRvazn-oNOGsz{ z4{h|U1h1l%YjB z;ZDaPN09wSQASBu=9d;L`1pV9sn?utl(Xfau&#*17cTWknrDi$-mAFib|bMp{oM;7 z>f{#YK$=5frhAB#0(TEUfv@z+)y9$wK2F#IyV!+4YotRG`wEOpuG0|CeN2~fC#7ro zxMlS~mQ7yIBdmSA7xT9XQZ~9OBf*rnR#Of%g5R!i+vAe1GhT+3Z}r-~tZbc; z;`iAKkTkVC-5kl%VyHcBXVlZblZ%ktq;tYl9!j3)RsTVaSe;jQ><{@QAY5v|T1hD- zl8u|W1}*>*@;K73=Q@M>M~2u7mOfN z4!)Pa(B{5lvdO)HIm-QmsxBejJcPSo2U4?xZ4%Zr^EYV3TV#vFlDiW!O$m1b;`Tu% z4n7HL)pDz@QvILO9h&liI5k@^WBE|LE`_4)+6kbG_ zY?&D;Pld)YV6>!LQ*!h?4kzegICBw>AB8rYlt#ubG`zlT`#aJiej3h10O(u(Qfhp* zaDS>FNq@U`bIw1*WD#nSMPEs=wqe?VJs_7wYuL*)RVfU0P0$s1ZzBc=I5%f+xukWa z&KE8A*fc>nD=R({xj8fUOO>a*zg_J=k<{)C+_}?R$tAfi_|Rqjz?+jhNQvXb#RiyA z(R4iYPaMJ4){yf}MinYqGn}&3kt^TM*JXJ-d%aA6w8JQ;*29^n}E|LM5ys%_K9}7 zpM**%KU-)1!PWwCbOSHQ5tq2teLd{O!s8(T@8MgU8!CO)@WzX_Yr0J~ozKrinVD}; zN7XFQTY^krUvd3yzkVI7Znn#vaL<>U)T)28h_PYN6V76KU4-4(o9y?KoB5&O)#82n z6hD3F#cb_$mAH%Hrodq4B*dY@%BQ>YB;3qKpn*PqF>plGz&Yf`f%d0Fk-x`7007?@ zbH%vyDC9#2Pq3mz@R8Ak50YQsZXB_FtiQt^dXCe9qCQ&Lw=jb8^3N5=-{6!(V^`>( zy}K%`@ER#-C9!vM`o}6>^VWP(nxoD|=)@?e8F(Cgn|Pmxhdy6Lb;RgXbE{{0%{KZE zAys8C`-wN%z%*~YERH?e3~(?KSHQ8|ra~z+L~OqfeHlnBhU3iGQw3nJckeA+5bQ1F z2YJpOM*_N@>qm4r)kj9ot&LdKf!!7pZt}0F3~GpCbR|-Au^+aLlts5O?CM{Q zWNj;^AsF?DOT{ymY(_ZsW(2xQ&=WGq%+2T>tEY-5of`Lk=3r{y+>*{+$79}_c8ifD z`SgPp+A&Qgwwlu0&`g|%Zij>IZl%SXIK<NJcif>x6+DU={Be<3QYi{ z$7$Y5_HtModB4J;pVww0OHsAK&tfT?+U*5SnfP|CmT7*iq(Q5dbDnwctP^0u@&@a| zp@Jh&fY-4m9->a7hUdV35u zFec(oAjiS0HxQejA+Fg9k;)dwvApiFmAs%Ou^&_{VO!9=)tDh#_TKo`(h8duUk~PJo_4PJW_Jy9FOGG9e0Ium+|@qM<1~0r2W*7 zbjE8Zt}lvU%fLuLu7!54_Bj~$r#~)JUt+Pv`W(7Ye7%qb+$MK(`B;DIh|g0d*?o2e zmCsJ8nYsiw$7x}&sj;pCoOb!<%$uWUR3zk;j+0$Z>ITSfIwVN8W-Lt1pLi6zeu1MK zWDG`Zh&s@%-a#fnvR=YoV=yD&V=m&eM$bSbGSDydEe!?Y@p{nWr#>M#AkPX(&4zDt za~nNgp}h0%oZ)p(hz|hI!uAFns;H`Y+IyBw&RWhJ%5~ZyPD1KvK!Lzv32nj3>0~_B zRKM(5&n5o8R?O4Ny2$-bP4!eLfQUBs&O(C^L1zjfR0H-x>ifKHkJ1m%-`-xnR*Sad z;B7fnGCt2in9cO-sUufyhuEBn6x#8ev5cDbr2QMHX{w$Y>aUpzObJ*%xnrK_o~h&B zWo{lC|*2m*z`>mSNPQjZQ$WdDX@|B zwLTnq|Etg}YfP~}aX8Bn&u@iGJGQiwG~$a5ocwgfH`D-zRFSOqsx76W@28=J*?7KQ zmW-I_{}j?4U8?tYRX27#r_v{$QmFf6(1H?$UpdKkTtO1Wd_>u5@20LR8y0ADhrVbS zKL?~(A#gCAO-M%-{BeuvN&Fe^ZV8rEMovA+-_{o-cgoBv5JWl|?%qnPwCqZPd8d0X z<&z`Y2eP~Pd6FS_yBjw|RG)fe#~zxtWC?7U_9-Llq)nsgk zk1qU-8QI$P_ioR%>%(!^>{VefAyJ6Osbr`kc^KIUBFwNii+)yhms;1*Nxp=-Rax3Ahn)O;tb(#nw$3igPe}TyhAco0+474Xh zg5F&vpKB$HyD02g;d_wmGqIn}^SKg2Rm#7-G!2`KL|>UvJEe>BDP2gT0_<)s_$#2+-`$cG%gR)D%(1>odC3j{K&w^q%|ov>O9?{+KP{wfogt{p1M&*zQZ zG(W;_c-E;hZUj~q63R4(b~=k+LH>vsv10}~sD~UhO8PDRIwyVE9r?a!p8+3Lzk_%Y z^bMoBmn{p7Fp`(l22+ss4Q1>_tme-=pp{*IKefm)gMT4;L zVz=r;i+!%U=iRFCIR-`&Ael{wKnsUQ%$|Tbl(8+cBTU~;E02yg`nusJ-IziFSSp2F z&!N3w(|q@BxfgDY0Q9GdtaIq3<&ueuiT_<%oN3*XY}dg?uRNI2-q}eF7^K%_{^to; zGO3kGt<3w3MHknn6jx@2bibaPDe|L7oj`j;Zlf5x5iwS|_(@|Ho6K_4#oyL4@6aAe zpC3{02G`(s)gy>z!r;*ybfWxi&sk!MyLzgZ7Aoc9rU}WTX*Bw^a2P-um7o3Bl7_Sq zzx=uPVB#>Is*%SP{JOoK6P+{#9r$s^r)#4FqAzO#TC>0aSmo9R)3XU7!3nj}`y&gd zW__J5$&Wg@z(=r|T5*67G+Tnm_MW$LAfc#fPUDtu6mZ2HnoZ&|y?tFKethS~o1nvr zb3emNk+nq7qCjHC4HrMF7eRcLiL5nf86R!(y+(C!*5~6CJW~FXah$I9^VyRcK}k(oI24+Z?L@lnaTa($okTM<~RVFfY>n^Pia<`%1|^zO`E05vrtXT zc6YLCe&s%j&;(Vw$bUlq`A`q`l7*B>x9}{H=^`Gba}!V!K(-2FJ6vm{W(f{}2D2-A zpe|fA2wp=|URi;Pq4zUO^sbRB<2hW(!9ov#Ps8aYm3Fpi zO4^k_yTyumRuHMQM4Ra5VjUY~;0>$^?UrdsUl7n!wB_SGYa<;AKGO#S{Io89 z-|AWF`(nB803Y`0kO{j>2Xdk`#b@%;DLbgrtx3SAD-7}}mh8HdBjnY}A}w9G66dc= z41OX^HBvL{r?v;FOnt1eZi>fw7IxUo+KF)I4I{t378N!VCBJdoZ(eUBYfopu%3d7- zqU+IyJpQ%$TUSDt9>zI$ztLddpD!k0>)0>?G@4{+w*Y+G#|HUVEwM-xTWp4%=SRmD zbeEWlsms(qQm;PDMZVWU+QW~CfH-4F{xm{ltbH1!E4d%`^^ae|S^fovkJJsM2L#(g z8OGjS!E*61xaNpb{C;Xbj^qeCxateqFGXEaH@?^+6|}7N1W(;yK0UsDRp1v+)YVMA zx#3_-vC&LQaU3)N${L#M8~scxnkTP*&Gs1%7wlP+o5Vb=h01KEpqWW!5uuR%@i-C! zP4dZOE}%&d_#I?wfuVw?&=9l;biMKkJNWW z?;iaa84uM1?MrEdz0KKul&4{|1gwf8_SNL6kQS^$c&9s(^PZW9f6GNt#yO)w)SD<$ zwQ&E9sp7A%pDxBZT-8{8z3(wLDIX*3h;(w=3^aCwFB$b)*qu)w%+b7&)>Qa zCpsmhT9i1P^Azv@h)q>d^&Vl0MtiQk#yruH`{dTdi$l*Dg%Rui#QG#8j?<+J7lCZg zWibZ^fn2+l4GEyD2ryj~EckAIud|hi>)O(O$pD2bhv{kJnKo_iak%!hd-xXSSV%Y- zld&RlnTv6!q0#E8FG+$u){q+MeK`TGi^ z1m1>+EUklcf@b0oheMDZA#O3LV%Gs-r_aOg4YJ3;GMpPva|s*2)IP@%XOx*RS?Igh z7gdWgfPi+u-++2lVfC1hr*gg@_Usw-ET`sMiML#`e!|zkhx-*!!i_|yD_DJ zZ-!ONpd`ANRke*(HT)b*Xx30Xo(B2*F78uDKXy_9i)QR%dB12EjbB0hNIMPR{szlN zCw;jDzj8TIg)$!4c9-Got+^v(ZJur|t9qT{KM|I8KBq!pJC6p4f4GQ|s7ak$$MHbn zrKhL2!wlK{=^VIWm!(AIvdOoo<2lhNk?h%s0qy=OI}<|hW+R_^1D0o$Ht`X!e^?t6 z+^NE>A-&yl#A6Q&iKyB`8`6x6z`IW7xPCi17nShjYlTvyuLSUo`=0uUdFUtF^BSI~ z+@$omYqrTpsd)}FRY8u!BGjK4+?}&Fnz%5?X%gD881uCGM~;1d?VO>F3evD7MNi*S zgz0&Ula9in*#3+@-KA&BwV=)xUTMb`-!j^leMc?`A-*!Lf#SHYL)`KFNT_luFE`uXuRU)%W8G_0g;Ism^TznEa2fBB;xmU zS*(m;iEAE647Q!p{YTJ0{L*P={g}s0BJyEVZztIP{r)1$cV;9-;i4+_^Hq)(VJ4p# z?{FAw-(f1iU-+4HHf1I#i z(xk{qcdtsXGLS(hLUMnfj7yF1^#V4fI}~Ck8T$+k6XPFns56$+ovg) zHhZ6rS7>5*iQ|3vZ6rdK;8q9`*Pm>()h;H7@bJc3rH?I(4M5Pay$sHPqu(^q75#r@>bO1`WD9kqV^nC;m`}Rp;;b+p!S8 zS8g8&8{KT&X=tbB`ENiIz#3FJww#Gs1kBn|1vG&8VielrC3H05mwV&TTRJU;nr#I< zU@ZRyZsQjsB`#?Q0=&>L;`B$l4G=Uk>-C|u%-GNvoRFr@Fg44b$n3IfyDp8Jv9Lk= z8N7#^XOZ!U%>qCW%%ol5v&*VztGf0~Qg*3m?X|M^rQKU6$x0LgT6=Ci^mqh&lgqq{ z#fnKoBvCyd1fO5r9YK3!bv6!&(^qcIq{6o2ch<&0c5Iou@Qhd&pC5S08#EP|A^wdr zjI}xOnq6h$TgECx@?rEvr&x{ubd9-TaYtEt{u*X06brtkOwXRQOA2|KIhoK!yWL{3 zf!HBSYeC`y0%Ttr2VyD1D-d<0tlsS(QSG@rfPva?qHVQP6!?14p_I1a*pFID07mlo zR(@sS6=gx>(lvP7Bkf~m&_v_1`8kt%eat_AB`cbHUN97@xAJ~oM-PLw=)5aHnW;nf zZ7Gio`f%xPMx;K{uV&6`)Hy~;At=fPzUfsn5HiDcrH55+;5o!5+ef@{sB{UuxUzR)F@?AU!_)TMD1UkxrnTLSN*sO zhOHsV4C1yNJ@KVLGoRPqJJUe1VcdxZEPgQ0msqyhEzErDbXM;VJ%1*YieH-s;!K#P zZ5l%~uTkz@S-vaz9h2@c$0OR**=pd_>)j+6 zvI^|0wg&)E1{IpcEb~bSdZf$bFrB@-+uyvjYL4}siA@10=!{dvkgLSUMzb;l)$RNb z+8X`TkvAvq`Wl|(D9|$RqgUz%t8yK(&`b4qHLK1Anj4hJ>w3{`rk#^PrU)>!vz5Hft_PW+9nT1KQ;$62$+GSIP z7xcdOJ@xi+(0&Mduon0UIPj1mbT-!#u+jX)^O3Kw{8MWhHl**2BqY#8f}YK^ zDBhf(wX!~GR1bjs*zB@;84`w}8l&>D#$KQ0RJ+qr9p}vdl`bNDg!Q-uE3Z}1TFLA? z#?bS>EJ?%oJ&~zV*3+?`-}1Qwon@Or2l2-5qZOSiGxx1ByWJFrSd9hzwALnzX%s)% zBdjvDDEuLb)9dUESEFY|%LM@F-}`!HmRt9Q5i(9lT-^5flE|ul>OH8 zfM0Sv3@(Hm<0KBulG^rkP1_w z-I4@7!GDiY- zf4xY3zmdk+zmd`5{LcN|t)N7rnk9hrI77ec)a$lnGl|1xfoze=S;1F#Zq>|rkSi(J zr;#3DXd?a)Rf%?fwCsXI_|!G7-^5IR^*JK($Swlwdc?TsqqO(ji?slotueay>&2x( z3w`Ac)U&MMax%3vawHYTlFaAv-u0AE`{MffZ-sHxC3xDLOQn`yb3?gsf#xZi0*>zP zHYMz#SPn)#4LgyG++U<)(%tPY9sO0j1_=jcH1***_TC{x?$u8KM1OtXP0qzBdpz+a zVgeVq33}5lG!Zl0K0Ogq2KO-P)oL8(#Sr)z{SV_;`^)>Y&sq;I0!!)CUVQI3qE0Sa z=uqH4-@TZB`1%u6wQA&#&-NkO(JTKb|D#a**d7Q6`~S13`>_@MzZ8i7^RWL`vDqLG z{mAv4TYF7l$*GrLv$Ay9WpM^vQ>EVSXxU2|-Snw6<01Rwys9$MT~nw2@~qY^$C~NY zxl*mRtkOm$^W-f50O%>EqZ0*U<;sQve$m#c@)R?&i2`{e{0<1Gc%$lM z>TQh@O8Os7($C%r;U7|1#xrMe;zz?T_g_s%wAKjqTqh%)!Xm$T{gK5vN&l>(tWwO+ zPo{**dcpsmX_&TmULp_=p{Cch0+Ij&=w&2lCy=0_nyMJ7q_FdIiuV`lBrfZDJ&YCu zybP&*&aXeE2Db~S@BV2HL;8MK|Bft)RzgWRB-sYxFSV$yQl5>AN}7P|pU0@DpzhLK z8mHGJYDTZlrm|aUN5cnP$d5^e(7Svz{XPOFL5nH-wl)=$=5c1jQY*d%T3h7@yik2i z+jUEmP7JnyB(PSxmZQvEO>vtD14mN3`WHN~MTA1;2_}d4o}H_(Ngx}gyykAy4GU^O z3A3t0vA_6tcFKVqL3bhqi~@h|6uh?A1%a>Z#T1K4kEzSXD)1=oKFwA$i;4u|=CSf% zC8Us1FK)?kEhNeZj5^A(1QHoQ96jC61X`M#9nJ}LvazZ5!Q7TOq|GA7a{O6kTV7sM zp^OVQ`m>G8X`UoFYo|lZ$4tz@!HCF209e84cI@n@j<2Uuiw{5^b+k>R#6r_IcWsoQ01~v@(4qFj_4UOvj9t_Dc0kFp4C6Vl`{5>3+7D4 z#_|g$jDS|)A)(>aUOpI$jQ6(}= zXmi?CAY(T&@G+=r>8@MH&SZZ+7&(oe&&oU;f&c!Cp(?0sfvlmrt2pi^ZY&$is)~We z>Le&Hxa-m43oigvW!v*jb?5tbQ^)Rep^LNZ>38!=+w;PQhRKlI0=bkeqG})*V1Zz5 zE~n1iqoQotH=or&+%a*z+<5Zz5YEJ;+6S4FJhryt5^+?6i9uYi{oVZ2psYgR-OUpJ zxNh&Atk8Bc6(cz1q(zr-CCwd?4W`*lgo+wwtNI=moqQnsGKABwxI*h;xw;z1_c`uD zRw-4!!Ok}oW~j2qUQA&Rjj_^js@ENnUgIDf@^rF&_z->IMgV@ z{lBUmlw88%zTatpMY^^)f-7S&U@rS8^J4S3l+_D~d}txsl7VVoL9c~a@n;tU8|4{K zvUntXvYBa#e9A)ycIp#o0P6IUV=9@I0#VubPl9vPs?f4JEo6k&c98?VCbTZf``?YM zL~<1-yF3!IkD|1m%3wgRu8^7x3+4~kQI2Tj5CHpN6r5X9d{yuF-yu$U>uetLC^oB4 z{_B}$<89NSY%Eu&<@_33Ae*jvh|cKXw#32p_L5f#EBYq!nbc>hX@ak(xh>XG1-)i7 zf@c_56EarG9}Yb~_$!($9r<_8fh_g3kFsH83J#vuQ+V3?qQ$zZ)*2k#4zLUv4eBV3 z5^-@2DDqiUEV8ScHMxFjtYD^#CmMt2Ma7Ft<)Ii}X4J%*>;TtS z43m+_#Q>)t2en%1SW8;k?uv4s0N*svzgg?Pb!a&mK@Sxl>f-)fQ1$KDE>S$%4419oq@| zQ5_{kO;Ze3CcbYCRmYO)!9JME3zG!i0|ou=hyv}kTCRa!Rx`=2SBl%(#q9>_6v;uyM1)Szw%If8Y8Ho~m@RHgMoNVy}5o6sp8zAzC4~na*O6Hfl z!xtB;4W0Jn)zKK8HqVWUE_ZVVKIO{0O@1kZ-9VFa)a}9j&6bbHQ6Opxeyvc4=q=5^ zxX@t9k!nUpeqF?LwXXb*y=ynA*Ge*_t>e8)X>tN`sk9Bp0-D~6(m7Z3mX2^2U=7lF zMD#~=6h#j%ggic0Vh)RFJ*MOSF}A=*!ABvlyZilHdAbncKBupYic4`pSXQgaQRi)H z)j*g&l@Up6n!;r5+sl;Ia`jY#vq}#A`C%!ocC+oDes>gDh!1jjEIpF>pCyAhNAS`q z$>b}5a6!M%T9YSB@D&+T1nQx=Yw`WK_VHdd1xi`IG?E zPyU?T)WVX0V2KcWkTW+6!NOnPkc~sw0mmW45Jq=##oaxqIAnK{RJBZwCnhH2)?8mH zX%LNkTrg2FtL2?bh=#et<^C`=+%le)LPUTlOq8u`(gOem8|OezZCTqpkQu6IJsGX9 zSVglMV>DheJ)W4;kO>MhS~EL6G_+w!NT`1j`%BmrMwC>gDvw}9lO`+`0ZLBmO$jpL zYz6ER)qb7rPVnyqd^nrm(xb|M~?~#R;gW7CYBzR zh204R9gaciOoTupNIE(0lLWF~ORnwxqDQXI0gpl~xKS+*eyE_i!|I?+V?S#W&#NMU z^@-NTC57+@6YGpLG=s&yXVatIZ6JiJSC<5&Wi<|jx2BvP&yUEa3_(+N+5k(YKh{uP zHC9^^nK(iwXWp@?UW1F2po>l3t}rmq;)F?D!2v(33Z9IzcJ>Rr)Ys?5U1BbIMf2J#D#TjU!%Q~?i6Ix+XuPH z1irjQ&4A>Csx-Y{h9RFMAQs>@(M)xO^0&7VZG z4>g|^Cxwi+h2VG?1&?3jdE$YfsQfz$TX+(c>g^?K*_4daIz8vQl+C83;p=hQDO?nKr%p|>A;9b7C!%FWk?Z3>F_sxkdFnN4E-KXPLUgn?`uKwf zI1Q)IMN{#!PzTG*p2{=8P(bwISP+biqmyr)+bE>-&Z#6{brH^m~YWOEk=QxXSNaElK znI-bTp1Z4nX~o*T!Tk64;eVxzoo!Z?NkirS#)~K8sjJGL?LCwl#}S)p^=&HBfXXn1XpS zdRuH<+g3hPWzme@vdvqs(BWGp5je`oV%?eTB_T;rVhQAP8c)_Q>rAG3$9Y>^aDa`H zvvXsga!@t9%p8SXPH!J6a~6AZ)~RVqVmn?E>ax4(uy4YQ4$8c$rZz}PLlezV27~3z zK0)QZY*es+VRMsjFqU0*<9tIzFf=?Q@3M};-hgOl_trUU%ppA2jH=V#@*;9}dVY8J zN0gZ**#z3(tN~oZxGoMTIdp6+4y0)Pg-qgcF&{r2xw|_&&eHs40!V@B#3Q>#;q9TF zXeyct$djdDu@4*yl*&RW43jm@a9}S3;LqUR$$d~5%Tl|O(Y|5=jrb5wcYrLY0$`d z0fAUdY#Ei7W!7%m3V)U{vul)&Er{EBx0tlZHnY+rV{KunOr-jj;h|_2Fd1p+(U&bg zm25GZRRG{^Vx*Jga@%<6!NkSHno$FHfd?dkq_8PhefA%uDR{v%FR!VlDOjY4#78bJ zUoOLXe|;pFAz?($chh2dDojUcMM)|mNcvW)Db`s?ms*&vm8Bj7MFT>eP?aqu-T8t~ z2Ij_qnW0Bn=s`n7nx;q@mGLi>z1%z?eZMfVad&>G{Y8#&XFAJBOshXzC0kI|gYcDLHlaTr>% zwsuw&i75b0U5x%5fC;UCr{J9n* zDYiiYiqbaLw8>tMA9{MpR}38sDK;X3FhO8w!N; zhFlWe}*_gZEd^mvxlxMPd-0RrWLwO8{qPq~M{n2&}BeNpKT*~1`W>3qENO5&%+Cr&JRRRzaun1EbCkGm}5F~%0QzNj=zq7`K|H*1L?+M^OtM#Mil z+--#r{tR*%nS^_c(=Q~6yWpKxr?I}Ja^WyYzb)9-UQegzJ|4gg)VI@WQ^Ug|qNmWR zCQQ(iccoiq=`)~MB;HW-(wBho7tCN~Ar%J@N5EoHVIY)7acU^`dCx`__PayaS!&yh z6j$VT@HzCXbMfHZlF&7s&Ijm_3*Hn*II$FXByF~|gK@z*Nk_>b{i6DeESY3_R2+<~ zPv{z+j=ua&-NRmjoRY%LWI+-WJaxJR_T< zq%=cC#9*lDJy1R#)cKW*slBohbGnj>zGYG0);4kwN_*aj=@wj!?*Pg*3;YTuS}Hf2 z)xjwhnS8y@UrpKP4183KZV!l$OEt7(5b7B%;0+OZ(NfoMdUy-cA+wB`GWa3)4^nDm$D zLu;JWN4Vcs|G@k4;Cf0!NrXy*bg#g$7h{YfpdoxdrQrhmR~O)DgaFW@Vcm}AhH_Gb3^d` zOR5}({1@SJUz550FB9^AvMm2rZvPj{^1oFL``i7$9QI!d|8u$RzfI%(&%^$2=hd$J zKRA2KusE7^U6=^LB>{p32oT)eA%Oq^0t9ym5ZqmdNrJn(1$RgoT!Xv2yAC?I&S~tGnyDt!j=Ek76e#oYXxZI*|QwPY=pNWcB;&Zh3<5xn_)r z4;Ni4@fL6afvfrZm;FDs@$ZNIBg^{tjsJ((>i>Nc{;}u(e6HFm-k<{K3qCi;@d%@HY=s2%%mc5 zkw=RucmkDokH23Ej54IsTbc)#tG3J*pLqn$(IOxyDINFJk!9wu9$pzN#b30`hefw& z88Zd-sfZBDOv_vYLU07xl)L%mz*^p zD!0fBi%v;y^yEN|$2;Mt`(#2jtvx4Z=GR__i8)w;wd{*zUt^d&&S$h%@1zjE=KsuS zlw={G{V^MKG3k;0amsd2kJujHcw;^VKHB7WATvkSXkz$I0I|`RWg!x-n)rwxm9){Y z4GN$C)13o;E`#)R;YH}Uj{9>FH|@bgB}2WnTLUn^?~+qiwA%=qv+>*;TR_)pKknU8 z%Lm^Or9xzO6It4GALqK$rPrDIbX0wt;;X?|#O=AtXReq6IxMK=Lxb3-d*)U2WlmSK z#h-ycVd%|A))&0hcXsOGRqKi_YrN0C(?so0L)=h!d@v!+eLXqK(W=ey)SZ=f&N`$7&W{ zLlB!CaP7(P6(n8w-GS#h_C;7ILrj^cSA!#b4v$B;UU-2F;dlO?{Ov*pB^4spl{NUR zF}m?UjZN1=$-<>IT`tBF%-A-xgCarIe>ZpR?U61obTZrA$C1uwcbo1)=i;R>i=;eO zDm!;$1Fp_qA8zWZC9|0wd;93?xhH5H#~(BO^0O~2)kfN>JT@&6qj}>D>i`?&a9Xv& z28t>oZZce0;xU-MaBtu^GPk6Y``dP<$OTg-ect7KcyY@HyTsjXC~NeDjO6GJJSQ{g zb#93%pI7bn+e2Dp{s6x9{ZGHF6F9ys+d1y`2v>am(*QbSNQFq8bphY9jYdgtR)+*dU?2?a03eGrSUTO zXBlck2k&rB4xz>Lr|?r^j+M7y@R-p>5iP>koj&WJiwjyFGh_D#k)anPJ$MqOnxuz^ zW90Fg=GRCCz|*lyyqnfwNaK)sgh_&k@Vz*v7Uf>}b#P2l(hU;r5wY7iPFh-t;HTRa zq%tqJ*{7MjPY_=s$mMCub5wnC(!%Gm*xZMRc&&|r+gka8@qdLqML-_7-hic+!J!Q5 zye7v?c3v2It}@|Ti%303>%{>$CW~IM*zlp`*Yt8Wk0O;8g~Iuty(8r+;^9QA|Ty|}gge9lHu$?Mvrqii?`P%hN zWYJ5QW!iZ<(-M-t>mFb}EquN3BrEE2t1rokfl{cSSurKC{<60EUNQ2dT~g^9tB!25 z#Qkd6;eLUdOo8xG8z1U?*>2LOw}duV7D)FIH02`MS)c|N))=dWpOr2H zpk@K(qh(1f8=zx83^iMw*Qu%XyW_3*>3MEUx<;Y7HP1yZsJmX)gG;BI_kv{^2*sjf zHid4yoW+h;o@m+3RIV?DhfoQ{WZXt$uv`>_Jlj?b?)+u$h10S~xzKX`j%8z`@6CcI zTjBdlBFYYYC-I5js+t{gtO-wxtG>ZXz^+r3KRQ2^0L6WZ701~q84rBkf1mzGC|hVdD*&)#7ZCPfk3Uq{1VIw!UatE+dXSnf^m z58YnnZ7p%#ELVYXISCS^CgT^G_K3{A{y?(8{SKzy| zXlc~^(%xuF#G-nmmk0=c*G}HJ+jv~48k3Ts;)usU0&=l z#gDq)bC6zc_@Gwpm(t>U&&D*Bky5mqbCq3#trBf+CFiPgHpV$E_jwreNF|qxgvmUw zh^9+Mv2nLfl|P^P-b7NvIOC_v`kcc$jK#vk)@DjqeGm8-9S6{MzeSRITQNbkog>f+Vr zQ(_^ou!SfQn_?_Wr14NQGzJ=kaV{Zf4SszGKw^>hOBQ`Y$#LKkOdHgVX5 zU}ztlNB;PfgpZG>>~7i9Vq;|rF5+=PI;FgD`RKM!|1Cd$E>(m}im*AowcO)(3GrXw zHbgxd$r4n|&3eBZAB2nbAy+tuVws97<3qplXpboDaey>0_a_+d z>rviIo`Q5Ke6UAD1&l8zLf?JAj&FXP6|P(Bv{r4(!qarO8uJ|PX_AtW)iZ9&$&*bi zwD0?P>GXNuRtRLBs7yW>bhy~H*j8kj6@SO6uv;R`ZCtWz{>579&c&=@?v>-#mEEIj zSK!7#@oU$`gC|L*@efIL$bVj7^pKI(niIuS_$16BF9CxVKJz=NFBBq9}wqcYQ2-|FMS&x2Txiy_~N ztmvufwAb77`LZI?IjiseMZSxjPl$~H7R3B=lPaSf5+iat*fjsO4_f-Xt!avny%-Kc3(<0q@Br( z#ZDFKCRG?We{{FaTPoQnqxxcTfIm3!@~hV=Fp1+Zw_EF!j{|Dbtel;1SUpu8W=d}tXr3jYXUK1yxv!WaWLwE3J9w{ z)&&M^88^N8a_g)?OtQ3g2$R4>{Q2Nd9-rIQ6MQ(QAm__|u+4X;Jd1&3ev1@5aYji2 zG~S$?Dbv;!KDP)6%{mWCE!_~W%zUbAs75d8>gxVV>jrikY__m)?gGsJ#5dk+9WTXv(SQg^5LwfkR#chlA}%SQg^TNwH_!z3W@jrrc8noweeAlw^Skkh;PkSOzxgqDLs$2S$yTRZuPCLqGiHfRC_Tdzz5WrnJE5A+k*^ zT}EqzQP@y2fZ`%F&TIEw@`hpiq*@MTHjCUR!t0v=ck@KcKEd}%^IL)7;>CeW${0zx zSLCF6Ve6d1ikguQw-*j((DRg$7oybjW$o8pi+8$g&0X_;{;Rn{j7(BaW>@fMS2nlh zQ)LBx+B{J^hyE-ngOt_ul~Y4LgTm7_g>61bh4fb8BV`!nzv<9`fyv7tAxTSxKX}Ah z%sonYfhpHN2Yk?PM*6$g`CNYwDRC5wC|)sEXjp>Dk|YJmnvd#i*CjGUZ=N?y7CGyq z7T;lB{L3pOk`LhuUIJA4r%l!5mNGKIv-GkUV!Y#bn~No-g%m;(7tn!Bb?3B4&T04X zUd={>mcoN57QH>gV^|1FbD-_c#Q5Ccb>m$WFe+S);0bS>(Yt;7ZcO0h+@EXMz7W{8 z!)0A}QzVJ`W}|>ml)$Ma@-_^-)Dr>5+W_zK*L~l=G*=OGJU#-> z6_-4WE^_kO?c6bwzAr2^4o^Fn;;=f*5UdQtPNf&oDxGw2l_OnOZ>vl6XQ%(`ttP|c zfPV94vR_r@YLYD(np@`9SjJ-_!-3s(;qdwxgs0+@iDba~To4~;e0DZaNGvSR>iKBb zIe>jU2-MwU;^qO*<}Yc#x{qh^R>>`e!(@T~p*XOhCvqkpR$cYxPmy3`bt@mf6FlLF zJT|CID<}F5;U)P+-?-y1JnanEJSq4IORg=AM${!9S{zoMO5omSOr{BfUnozw%ir|i zfWQe`prQ$@LUbTW&pEZhP*S`S-m24?Ltw68?`44YZ+ z@7S(J($1svtQiS8b6Ey;5Wa3jN$Ke5Y)xj{Sj;StT}S3My>{LWY7MXE@qk?S7;hUv04E zJrk38z~22(d?^o1&lpurM5|7g42=``>m*F{2Y2WF9?mA?Gy?f$Ev|4*e8DDwZlVBw<)#;y1NyX`(c5IG?d!!R~tQ`G-XA2f~E6coZEwKlk zNIACLFErOmnnNUk@-ehc1J^ttTKxKv@ev32^56-t12v*p# zGE1>{*Xs__nb@fR-k^yNs@4o<{)mc+{5+mxT|zbHFol<|BrYIDl&8~&AJxY*Zc<8r69cI8G-}WfCqXDaVKQJ|JKOz~&er4w#E~nHERIeIzv=4TVjfGOkHz*@!5$*2XQ+j$ZprK%AgooJ06qVb>k08Ph@cnm2g9a8mb`Hbk35EF2(TKaYr-r z05Lc@gvX0;I!kXkOj#o(&&k7r4(Z0r_ ze4!v(mD_2`Rbf9M(lpP~clou;yoz{M>&tC#bgfk6Ik-5SVYTdPXC0GeBF@{5$WP%F zW_Xd;&D%Q0i+ZZ@Z$~jN{ID~b=nn4g6@aPTUOcPOXq>8PYyzHqLY^+0A66T0UWg*= zYj~8+^EH#1czrC@m#k1@@MjXBlYMc9mMgw39tTA$@@%JOK(3%ibJ!8t@=v`C_bCaYI=-Fyw@+l z!H_#QR|bo5^OvmxfOKWCPT2YhU;aGkFP71e1mD>$){8J}LI%@gqur0~kGIl9h+l5w zt1lc0#?V;^K_@3t#!?nbZZO%P9AD}f;;k~wX;Z|I0c@o+$L@pHOnj4>TihsXDieZh z-Nn{q>(W#|#>yQ$eOfuGTUywpIqUe^Z(0Kqmh-JEur8DU5-hcFrWgnYL%JyM#Vu0#1;LyhYR0|Cct1aQz+!|y`25(GKFFm zTxFe4ekPEn-q$)PH58vo^3{e~9C!-c{8Ak}c#f`j)Ul^%ASIvUC;0M|{ zeqJoW3Jz+Sj)x*G|E?NdQ&}ag)aID|TOSsZ)j`ynUw)CT=j5_#$3J|}SdYcQ{$&(B z@ihO9f#?|W)g9WQF^BEf&2cg{g4Es9Yb)~RexWoO|z|}S7H=(%Dp$zmS;Uvur)_; z8=AI)NC(&T`wk`yBPi*YK-VrIxim6Q^E7^kO^h_q1uc6EP$x z8$jFV3qN{GM3*wrwP@(PUs`)U_MR};@L6Id5A{mJ(HJ;tp{Z1fhFL_cSns#WBZMQ| z#PNvOKu#hXKr&pYO>f8sCOZJRsc}$;kdxSK*GM$1D#wP zr~xh!0>bjIFAiI(?;n`CBl&*<5dW<#-$tAw{t}gstXW-=1^{Fu1r7P1!fSenuS($b z?Y*bwJC5)SScM?-SD<1G1*Tf%?B_Za-WskJ%Ww_?4(ihrC3_AQx3YdAex;-)ny)gI z=uA^Jj~mGw;K^0=F#lIZfQG|a6pU#VI2~2=#n#{nf?!eF8CiX4L1BZHaLU+z8V}j4 zcq5Z+uz%J}-^K}uJq1{ef19m3XJ~167nkT}VVCbQxgM)h%xSWqVf{nx@5i<3QDM7K zp9X4Lw%2qbP)|{ua5?pY&a^AG}(6B?Fk^5jvL|0CZm|4Z; z{t6-zSGB)`JrLq?lI^fv<*OSa?qB?T%@g#0fs$!^>#_G;%lkCAM54%9g9|SNAsfzw znvu^f61E|mpY)yj_PNRCn_NhMJ1(|4pa*~$tviio^79!qV^QRhINVXCHHR@JN{K#h zX03)Hd#US@{RbX4dCQ8v1Sw>Jqj(5H0>_J8mV2C?;ZzP0YPGuAxx!x;OH%^Y9&L_N4K7P@T6J^`oT)Ft>T$K? z5w~?$Q;G&_taGW^MzMfgwk|Pvh3Uai;dwpi{^U>`j64?cMx6 z*F-g;CMk=i%St7y)WLBy3MRgA&}_-@i@N2x4Ji9{Lo?4w4d-ILJPokgCjv>O>Bx6g zCCg&HW01!ngs}fhc9&%nmytD>!TQ_LA(3tig5-{?E>q!xYrV>*RA0LDPi3#0g~j>A zW0Ow&qio#!Q-ue6Q-0PeE%@VY;HnVCIk_@)cho^t4hLPphAFXp`dAJx&+9ZZGo|Fy zwD+gU{+R`^738y*F4Yg%49AfS@V9-FXJ>bgjcCXK5zK*#Ga0(N!gr3x z&jPBpzny#Z7$DkMPO#;IA?@Pv^_;^dcGoyKE^~5bv91HbVwMuHM*r6^d)EaohugZ% zf`+@~1T{U-*|F8Jl%QcQZ(JjpV=7usuhx@)QrvOD!TqwgeQPh7KhV0JHLJ>LUnnzs zm5eQsSY^K`&%y-5;*TKBx^{nE2lso+21{Y9|7U_6BEeCq~kAD6Hax? z(i#VNN!!XAfo%Q{ciNxBy-shaqR}5V&M)d%cxzd!Ikhrcr*XbhTCaR;BB|$A{?o~p z)b-2OE(lyn&_#`|<9W}+?l~Gl*Fc)n{OVfP0iNj|mV9u(n~@6bKm|^H)%Au+x1lhd zI6B85Kq9hEB*%5zgS3s#NYn9DxK>j#-#R5NcfCx@2O#MV=Us0K0QF3vFB~_YAgIj& z_PWc!pj-uAjDa1?NZGj%fXu^ALXDqu-LX~^(*r|nJd-t9!c*yOPtb*Us0{{am>ZFQ ztux&lK*lO3bgezV%@vd2_r}7Zwsohkws_*x>j(REr;n^QZ&duDP18EyEBVFjmuu6w zCi5w*7p)Oe(O1^VWg+ehBbh22vc*W4Ro#JF6Q^tBxE{ravt`uEr6@_XwkIL?`<*4b zNx2(@=hJS-bK0>Co`{bRYXeo4@-Ij37`M&{Xq~ zTRtLZeSn=L5IDMjY#-IoMyo&qo>g%j16rW?F7tW+MZ6#VCoNm>{xf$@zUG}y#cO~m zYgJad(DkkdK7%ACnhm4^n{S^17~ySnU0q$00KaGeVP6GKk;}aYzynL<%8Dj9K9XPp z@5Nh62aYj(#{Q2V`Ns^s@*h<3uaangbn1VVp#KAp{?A(Vf9?36r1pOy+&cUTXY_Ui zBI=p${6!+Dr+BrZV>7V~?JdF-b{%5zV@77US8Cg(%H$G}d(ime&zkb+>huciHH(jy z`LK*r=~>>b1FRenp6BERzmEf~4yj{PBzNd)5U*4v`_`nz#2*jqCULRA-qe?ues~&$ z0z9h*-X<_W3|2C=?4(Lk4)7MblKqyg@+R=|JL@k^c!JWAt?MvY5qCDvCFFt4T4=v( z^IGg)1X;FIxLEg{B(2sp!$(IzTZOhQPkx`g>JKphcen1X93vTxp2pO=Km|>aY2&Elk0uq`V6Qjl?mm+en$oK=NAOi+MZlXmX z|3M4ejwinz77VG|Ta6=Bctrb4bc6$t2?nWshU4YheINMwyU*F`BNT;mg9d;YPL!9j zh+l4?=5$s1x@;2kej?GTh2!dRhT+j7MUvTw-l;}vN|`a|Or%`q8{fDHVhsi1Fa-eR zN92ypGGZu^*)yR|(Xv+sr}zNxa7sSNzOwXKm$GMwhaD=>wgeJwSBpI}+zD-aV(pOm z233QA5d2*?CpDG1qd&V=K3&e~?ZEo_IP)XMsns_VG05|wM75}mOA1M0LgQU!pl{?_- zd2F2o+*jBX%qHET#k5O&cJ`V~&-r|S#F`t-G9wOg<6+e-sP3Sc#C(-M zC#)dKO(@*cxZB?WeSgvwNI6w0Dc8g0eyv$B3gD)UU9nP)6%ej}o7zhO>HwqKC8i8T ze4DQclQ*)ad+S8{*3(b3?KAet683 zZW1W~(UNgz;NzQmGNGYb7iVG7-Pt<@&vBR-JKd0VEyoxyFhZdC4&$DSpXH!C#a?3z z5_>X3W8*RF>k&Ijds>}tP>la8X~a(-XzyC#l5hm`gGS->S}?~4kdDe!F}0i(lTKIi zjX_5HOzd78J@eTgZbT-#@Q$Z$9qs+&&HGveF~kz0Vuqp$H?3o($Ee5>X~wa3mZI=3 zbfUZ}^x23Ht7v|wyDd@wYHJ7jJK{LELSm^fMX#_?^&n*_{2vfCNSlR$s_mv}-dWr) zViDvYJql|+t=W9^BiK-=y)H%n5p=M>F2?C}ifpU#W-XHodaJ=#fX{*tKDdJDfAjhl z`lvIeGR+BUE@$q$*1(F-;2^p;UxGYReG-N*&+A0%A0g~reCDgd^|5EBt)p$x&AX49 zu?lig{Ah({z4CBcvv)ndNFoNW9&RCJ7q7jpx<)$ZJh znD;QobLS~Org%VY=r9O2mA5WK;tCf6w=9@p_?vIHHnDphKv9x`rLlZ4qiO7Bxa?glMnhn&9gf) zei4N*)_L0!A{$pJ#Q8rdGykf(fV;QG(8+y5PF=nGtQ+0_C^$L$JlRMl0bR-3GI4cf zmFM;oR(=ZULfXZ&sAoc-)qnbz`H}lLZq*B57(xPM?+hd~p?LRmHlzVF_c+h_Kc2rD zm5sD4er2dx+D+}l8p;#n<&o`G1ADYwiw-as@;!294nN*z;~kO1&uJHoqV5Cg&3vsn zYHgN{g~fIw58BEoB``x$3uy~joxA<6@3iV&Se09PdR&v+uj(v#AC?z{&3<&<2L~qe zCd;+_NC=Z0qs7vCzuXaC^B`$0ET08S+M704K}TV(3b&z&C-J$#-p)*@W;Qq#g7ETiI14bT*cLV0@jP`1!)VR>H)uhm3aEj zpUE@kO6GwN<|~+-#Pcvml8MLehkVCsB7HNC!@)MGm9ggAS{{j13ZX zyP>Hx?wZL*%Y-948J4Mvw>C@HNEJllVYj_pEA;R%6tCPu}2#LQ_m`Q-HA}EAD}4p5U*x2c?i9S{Pjfca~shQ@@cl)$Lm)0 zT8(1u`5RG&u-$v+MI{oeAfpcVgjVsYO1w{giwPK(E2EDT zz)o--O?2h+EW6~1BM5P#OAL^+gwsJ~_?KEK2oD|=U)itJ<$bnA>WkYP;kA&ston=s zfQe7`-62J^20^V<8FL9?YU1RC!(}{ZLkW+T-7t(Dh|9$L{9cNC)E*-)Hi=wdYH@|i zXxN(gdNDC-ZEkc{>(x*;f0V+JgSU7)G%du5S0{F?AeQE*uM&YWSL6K3ov_Mf(NYKS ztz)5wXKW%CDn~~t8#sv8Y02iEt6KsNw5_`~Jc-(>nixOX4KWBeiW$pN1cT;0dB095 zySDrQ$~w9b$5OHle2Ada;zabum`%dYRW5p{2bG#f-a^#8kc-DF7*NmVSmX$ghg>?( z4h#?3>Yb;wZ_F~$$Jo64k=fAG|DH$5S~^NInS5i!g;{$~cXPbeHyaIY*z@)LHq2FO z`w41nrmo2J93|IXB8{34Du_`Q$fY{cxfa}B{b6-A5b)g2NgMl!0i%}4{dVwq-Z>!@ zTrJFr7Y{Mu);Uk)v74@`UGa*?Fq8c7kTrSiu@rH;68T__{WsZdZ@lOtX1`b^9aRMi zgkQeT)UR*EcFO2i@;^!-3f=T`%*t#0w45w#aFt>gH_yG{NW3YW3Vt)Mf{mRl5Ah30 z@?7zZlvr{X#~iu6xb;=ddy4Qj{)?iTn!oDTYpq9Rmf_>HHbo{pP1aw;LGGe8r2{2B zp~d;z-*=vqg;loOJ^Q*<*@ZrgcxHc|Zg1BY$=!VGw4Mx?JtOo60RHJi$OA#h82I@A zjGz1qCjNt{U;7B&f^YJh`{o<_evkmi@76cO1k#Bs#7J$a9^T5^PfkoMl#Z}DIWZ~) zWe%SJfcv!%cH=!DwH+X$aR6wZX4Y_W_mrBNTBt5DIU!-075I7r^#sVNOX3_Cf1H`g z^+4+3&aU}k(`S7@pp^)w3s9~c_1u^yrEqrvvVJEgr&Q9n01t6(N=#Br^-vEnX#-B7 zgZ!UZ$bW+QzacOBfwAQk9v~xKeNW6}s-$q; z^83hz*nj3=$;%H+1y|9X1K(u$7d~;z=a|X)P_*Qunl_;o|#aGUz6$p1(?7ZxFt(v8I2q#;$=C zCp<9<;vcM+(vUCW4qWVT2gEI4|FdZKdQWRtV@X#`plm?{^Jm^Ub=t9@in6%FI)1h< z-W-d!j}g9>9-zE0|Mr&`Ts%6iZeK^3p2rf9TK7fG=|l>LLMf|3s;=8&A@Q2d+r)>_ z)nxK1GqH$0a0`ZrOXTE{f3bp>F^|?z{yeD;dEW^eRRA8y{PZC{^?&2G{(wG(1O}qC zjb|VNPwZ@+M?4!Fn_DJOD2+88B&T-tP646WRd>hyaQBd%act1%L2IrrEiK)Y+t=~S z;CGS)ZtjtGVG-p=9i3|Y{(kjbdCmlA{Vuw<@B@o=$c+hbK>cM{*T;dSWsSaZ)=187qOdf3!_T3-}hFLD9@syTul=k7vurkvMIgboytn18!jme^`@DP4<|sq4mH}ny&Gt47cpAfdv%BP}n>A{U zy+>Z#lu3tw*M}0aoB1&gj$QNBAs>#Iz69xh!4XX;PZ@81HkTqo-h5R--$N^JEo6-s zm&7cKQOQ^3rFD)0AUu}SUwh}LLA@~J5DhC|sh!XA?^(K*VbB`L_nJ>4I9 z2&yg9y@=;t7#jUUrit7?_rUx-Z2zK!5iALG()8I-fz6^Vg6oYD4JKRgf;3qWRfiX=n@@GgAfu=`W=k+etdL_Z3 znM3x z;}ykHfUt(uT2G2}u2-f0JuibpuX#XfWm9>fUqsrS-^I>ZW`{S(UhqTxbI`X)k^&dm zSO1hm;wWjcMyxAxceXjL>UQ4ShYFEfooI|_4vPDhu%P}bR1*Uwji{(z=*1<`qmaET zJc|i6Nv=YC3JjTvfZhxPGyu++--P{J3v`V2Qpn`9aFrQmVR~74ox%HqET6pt(m%dD z`V-kQZsJF}WUAW{32xd+M5c9Mt2WRolZM?A)sCe6!I7dBUqYUKf{2Q=Oxfi;dI4`X zoJE9`skSSis8rFd&HkFGfK!MoQ*ReY5r(KnmXoi}Cxim3!INOO;!fG>)aiT1+&3GL z?7~{DoKeRf$xRfDtVzw%1y(?)wnO8&T601WzpJn5rzgi-V+k zP3*Qh{bcONl!S@8%mo-r*{k&+di3|FXY=hf-$yJsl$@^@0_2&8u}dh7l;#H#>1qi_ z$KQ^%PvZ{6sFuurO2;C)2vFF=(nv5n*&sDLxL7H>*jHeJa|e=8%f;vVh4AErVjY^v z1bH-rmEC{p*Gmv-#9mk=&G*`^TeNDdmF1=}=|9|Y+nvi9j$17<(;VmSs6(Sf;#(D@ zs1xfK50_RW>d!@!vlA6ww~s^hg_zzB9{NI5PSRC&0}T+p64V#065uz71x+{93|q@_ zefKL4ZVQF~u3Zp{BG+b7gjsNeXG?_KTdf^^jXbX z3rIG|YUz}`s@luulp0@ib=5wbLmK^~p%&A{sN+4Ssud;y3E=RYfp1Mlg!^H^hrSjBRwi*a{3=v^_iF%D~ zkX2&uKz5u&t4Ip1b|Q|Ue(&yL%D97I^HEy=eXHg zEg&1ngjLb>b^c^IYH|u!$3DXd#b@PXx?=sgWgttB)Loz9NcS~mYfN70O;x|m2L6TD zkv@PY&Md*s9{ahVTBG{sjk|$W#2K-jee9mJ3O)O={K*dMqiM#Ho(HD03j1-A!F zA@HA38_x5)A7mFXl`@w+Yq)%pW2-_+xd&IXW1B4!A2r1($@d~{2H*6JhDEp1hu2aj zFJub_B861orn@q_5{&x;>eo^EJi{Hp`d));Tn(_>&U+9I* zYz#mpA^n;ECZUAeNZ3ipC27~(^euGJ2mo)w93EZKJtgD)hZ~i5IH435w@)0A^6xQ$ z-ypP>?pK5&XT_Q56&@~Dnoqp!x0DDaNjVt{W5g5V3@Tm7< z+(q_)M=$#4dgZ;*5-1wvt;~QH%Y93CUNwR5fLbJ=37P;0kMm1_+$9a}S^f1vNmG!C z=QcMXwQiKWe@{0pDQu8MuP;?O9lMUTz<93kN(R0vOl*OhGmXaG3J7B*dQGcG9twb3 z9+3Iu0Fq8SX}(XwQ=-Ni^&H%)uS z04!T6tb%)aTLgq(Un5Apf2YhTmj6iUR&RzR@pLMaZ}3&nFsj{YkWnEI&eS!!;nJZ) zTQA4rQIhUm`WwRnPq=fI2FsFzBr-W5>WvDQJ7*18KEoVNxn4ufQc4w0Qvkt-D0+=+ z#(v!T?B_lBx?X*XrXhN`7w2yuz_Ul`?2yHkG#PA6d#cBCssm-c<&ksfQksREq% zbn;1-2xpooXA^}gh$@I)x#Xe(AJzCUDqgJ$zR;E?xN`J81saXPLujS8KFu=E8Etv> zIrH;iIfNgIL{R?UV`0W207H=*QeUdnll@eBgonJz*hkx_>vr-t=_p0===s|j-hSt5 z(oL?#I0~PL8|A?A)Jl3+*Fjj|W1b;dF5%;oRI(qKp+gD2o+Ag|Y$#iv#`2{6_j)Tz zgI3c01`>^T#%?~<$!{IsFKy*u#=C7uj&OcctML42d&;O4bh^#cu$9OqrE&TLlgzFS zC)RA_Y=4j2!oT?ARr9V^tVz}q9`gLwz2NKqrLYrJ_&fu4Q!+u` ztvM?V9soDLgOVd%hwCLRKoqOi@-TQkUVB#oc{eb7jYN8WzHuj~$gok;Zra*b-J~`8 zi|I1nkum3^yRrh+Jn6(-P5B4=V4=-mo74Xum;8D)OOK?kcwlA^%73K*vZ$4 z8v-K3|G!COf7FC!8^1GF1`8+m5;+^`#6RL4dO{t4Z+rF}+NN?I?AHlA5Re%EZ6+DI zP8M`zp|ySe8>IF`Y^F|A=pl>$`$LUl3}0|gbDyK}!l2E>Q_5+eZJmjUiR>RuI)T0i zX6yVm9G+QZcu+xppuKxY*8NY8{brZ^KWB%{^#h){$AcYiTcj=DxAyoFh*#Hl%)5N+ z(wU7Ae7Yfc?cYgp%hkH;v9*}=11yl`y zDXT4Kp(3=*Q;BKTs4Sk6pPN5~pgWNoI^kq8fSTfSs|waEhA|#cvn9PMwL{!z`TMLi z;Xuk_g)f%)e1x-ttn3=4yX^}IeFtqJSy2HfI>+PY4$gefY(z4K1rOE+iEiHIp89|A zKe$YJe5s``gjD}~$E=5N@xq9q-nAfag~J*M=Fk)XYf6szRvj@&iE)8mFPY%PucsZ z{o7E)Pa=1kWkwz2a0!P@9!LR-6eh#Jt7q7(y8tP6Kg=Sz*!a)ea(vHd)(f0`IMWTb z-8c^scU4w@#tG>(O20BDtB_P`H2EermmxPs8W(p_7E2HRpq-!fwj2Mcgh@fdwQz4? zyG}ab*#n*9E|FT@@$>6HJob!qTtKd_J}6{2cZ2Qn4b*+Yh*j$iv&7>=xq4j9ca}^S zfH#QSu^;kBDez3MH1N$m6P~o$m=tn>q*})*neMsxC5_uCq&KhacaLSe4?Xrj_j~(a z)9pTk$kd8i_s9Vw#em?63+h5to^i-4S<1@%2G^!;gBr9lEOBHbqh`~#W{d*MY)-L0 zE^8i*QiBjuVPIzBu=!CNSLxd&aXF@5_tuIjJ~ifeHID3^ibT5Es3L=I%KRr21Lf6p z!pXqY%ka3}<&52Gtn5BuK@Deeh0~E*g=DW7>5f25@KgUiHRTQKn{5-ob5<<4ys5pj zD70C!S&lZ^C!q>VcW$O8?t8%aVNcrg=>Wp~RKWnHe2mn%_af3Uv*|J3R_Qde ztcbgPm)80gS$Lc&3c1gG3|^94un1${JdrtwdKJ>PA7kl0v|#C9l^u~rlsIROpB{cblL9ax|n&iR!rMUc^CnFtV&md1v@aybm7|&@v^s&xzEc{;lbAI7;8^~Lbw_}Ut{t7wW zBm6XBpgK1{PHPWgaa_aCd-Wy^9NuTbtZEL-;A7}&CVBlcm-hS<4OZGU5N-haPhh$M z3#YmmelrQuj!kL!PJX?1lOtWtHzgT+b*WPv_3O-^|>Z_nWWkr;17?sk6`S-RG>m)+1{R z8=?fje=`Icdp+trfu9?f=_$oESW}I{vGsUTOizrQHMLa63yHy3syU~=ODvjW@Df`_ zNEETJF?T@7@JSOU*oibb*{<5f;9F7PZZPq86RaK44@sfJd%cB3hDC9@3fo=6v83#l zhn;f8E=${Kbq`g|Yw|>CkRva??2|Ho7VI$Y!igh1c-@f{2Sps_e2q8Gwla70^M|7h zhI)3@0@h1jK8W=wo+HCh=r0HP5}@3#4Jc+4g|rgMSE(U;fq;h;$X7RGcM~Nh#htiz*|Feh#)JufS}PHqgL;SH+7w3bA=-s!L`JQ zyL6P&hTGY$ zu8LEU=r6v>kE}y;1;%YDcrQLEPUJ?vNgf_Wy5;T(D&H<>c+_f_7&b0$VWOe0JkFCp41kdqHmk~S44OhTt5;#P zw&?mH2@MwO^E&U?&#mER3((s%!R@3S)&mppcxA`|V()jMLBs5{^wDPkx3kRQ1{{V;|$$l7hXNMNRKUCl8U(^a^q@_+WFQAclzRMlhPfdFOywW z+#KVa{Z-Bu6?#Vr-22r(;E=0l|{E@0G`mCZM#udkz%}Z%Go7T z*;q$SsYsxD*<5XRD^*i`hdnPSMHuZTg)`xLXl*SngHV(38+$9Y@X1n?E}965HMFt^ ziX|xgS|k1)hpY1^^@fo#)2r zes2SSQhU#d2)jX4A@J*Af(|(yh4aHxhsR0x5_32_&g@iP#P$8nS&_LI^ks(Vj= z=9fYZHd7eJB8HEAIZ;Y}Ev?=k4j~LbanW4lyb~1qPzQ;kl4veki!|+$Cbj4%b-gEm zQ1|Z~;LXF5+3HDIa`=NQ=i7C%6gr)Qk2s^br1Twa?vQod0;0`oz=;A{i7thFzw@hEU+_I6gA-7Z5d^D7 zaFIQ`rw5sN+2;nUJQhLfG1cEWto+<626UO&!S9FIHFDN?w`?8B{dTKj+iP`Veu#C* zXCf7f>2TG=l~ouibWI~AJM<>68`pRHDJvX)St9IN-CLB?wV|%A>6C{~ffB}XLUjc_ zq*@%jFgCUXc`fqguhQ;2k((H~e?X_;%W;W0h#1}%WwJmNIt=Yq+;hQ#^as;GJlzX+ zB6G;Jxx}Wl4{|UB?41wzC2(urufAwxr5RH|SyHWgl-MnLBGUyEw*@gs00rtuu7bqn zNI?BZi7x<66)Y25^is5SKZ)Rt=q=ueH=wR1dBmU#xNEeNLk@>*v5!CeIfb`F@7qRGdIdHWtj zp|;Y&fVQo)>{g<*hTnaMeuTN((^+@dA8F%%j@jT`EZhMZrLtljPBm$O-eVfm_ z;XcTlHnh=yQh>%1I@0(ZaPck9e5aKDT38WB!AG|&&LU{}NUms@ zs$AeY8%2M^)|SCo-|#*8-BDe9k+0ipaJjI;xnqV?k_91C<Zf{Xsm-sd zktf4`*4@hq-LRFjg8`Yqa-{oGVzo5DSeF{5Z2r>hO+A?K-BhC{;gT9DyL8VB$0ICA z2w=6=vzd(om0smD5*)GcV(Nq^Bt&vF8IBY}8zMF8R=lSrAk30dJ=e>|@=ZHUr56S{ zcHtA}b!S3DQqY3Qtum8NJfSNwDb=p-vYwXsniU_4mA zoT~kgLiBczp%7$8XkdUe`?A-JDm-0uGxF?}rcGm6TZSpEvpa{p6kxkVr$r?uMnvN7n9&5V|Md#Hr%^C^`<*SCk{J|k$@Rl zBE_)Tl6Z3ZI4vYVgOI$HM%v9)*6)zZncnAv9&m~#pYnpCrOa24Hrgv}PZ(k|UE>)n zadM_*ogwpJ7H({rxN_3BTu-v*HB`8psrjg=T2E4~q{kv-yftnvfOPKKKSJNh)g4UP5 z;SOotG7ATn3lsgpw*zjKqSD=S1FW|*D{P#+l+>=VHWI#uDTYUgTsVJW$WmJdLrZV( z6i4^ow_ck;srKSofiqv5d3>52BbrV?c(9*|RXkI_P>MAfny0v-{p{9~)^0oMaaPvy za}I8y`8PYyYj>ZJu=i&zf9U5oy_od4cR&5HoH0DOXr!o0d#VTK9=%@erFkUXD=p7g zMzS-j!;Fid{qAo=oOao#+{!Fc{qf{TI)SuMRxac^%uj7^^m? zYJSqca2#0Ut3P(1bDTV5nCP_m(8{Rk^hWc@Lyqr&T|YOi${R-q(lE8AH|uypz;L9D zy`PKIo}82#3%=Zl5BCX#gaxS0UJJgax>>>I4$5nObRY7%cp6j5Q&HD#q(X^y0rRHv z`j%fjn)ZF?m+?Y3SU@js-d2#8N{|mu=)%xI8h6KcNg_Uz(e~qJTf5Dos#F$8?ekX$ zxjA?nxp`|YeouiLiVO=2EG|8MR9)uW(kZu-*n5he%C#OM872R|7y?%UKDFlTq{`N~ z1~a?!?u_Cv=cYL}E&ul#ow;J~aZ9$SUoWdT!7^5+v_&PUTG(XB)h=Ul8+?m{FY>NG zf@+*cgB$!?_-0quN&}sk(p+5H?(FSAXYzo%3s-dviP89&rdMUpBE|z2aN~}_5z_$+ zNFAT@^+^R)u~7!Mli>rYTeJDhl1Bp~!{bycTm7)RDZV%Z797@At2#)LCg6E5l6U9k zxvu)^2-4kRD*J(;3#0eJX)jHgtU4%x>>a?!Ij9DS54XFzonRLdgI)LTA2D&e|0szP zxiSo8{gHF6$(h+tfQM&5#0Z~IGIB&orLh7HaER4?xOXD zd0lrN>*xhlncnqxqc_TAO&g5~-jKVY@!?4@jdo)QJV=M`OdB~X4vBo76g~heR&69< zi?m?Pu+HFPnA_;ZCN}lmN$+TW()JU2N-`b%WmQlbcQHy zkNK*Py>PY73ah3>_AuHJe?QNc|1OdmA~9)8f`r|@}A_Xb~$h_7$1 zS(MLcPbl0llIlCQR_nFk2V{Qcg!5su62Y7s_V$5^K&GIsqLqHlP&~#EY3uGfZ+a_a zIKrD3pTeiuF;8N*(uw>BtA=bNrG{Hk{!Zu2!qPnKO1K$}R1DF5jt@Q6PuVIIx|&_x zedXMmo(D_u_?gPpIz9&#l#Ru_VOf}X_JDsg%L_Df+nW)5YZ>^{74g7~wO?mG%sX_02X_tZM( z8x|}+I7~Vahr#4l8d#Uw$C*=7q|rjM9evrBUX0L`E;@zgS=#l4aL2msZQH$E@)yhl z?^9SNoC3*24Q^*yJjLs!@NjCG8CY&_@pKzpUOP2Q;qhGd$+35zDpsoq@i&{iDup^D1)G_Of2y13FC~pw6+iKC z9gUNYOCjRae|sjBWg5a;<6z#HT{zrQr&{7YpZ}dm*6B!#NFuDf@Z@?2ZzYG^K=f|3 zF3W+-D*4cf`RE{n+4~;9VO`NoMJtFCvv6}PDWsaO{!>)f=9HjqzWo`a=Xm|vwAi?* zTj>6*-NaErV1ki^^T7@FfgJCufPsGJQb=-aqA4R}_u_#~!AX!e>>*bC&E7@(tAbcE zl?HEyH0b5r?oTw8F>6rMqrUF$)MxDUa&5v+@Cyd<0`ZlJRgz|dtA4u-q`<1m!kz4P zFZU}@SU?Xs21doa5QTEV6U^Jnf&K?esrFukW>2|{jg8OhKZx2Zf6)9mGDjcwJ-r}e zkh7v#m=o?Ir0hUPH}Szq>c~lqLW_@)e*)Q1l3@Cj#mh zC1w3BXGK-7sNdg&2_SNPtK_8S+1pZF+^?u;Q4^~ehK8p6)YX{F$*K0#srF>H`38`y zDn|yE#d3hlt7@uhG5|R*(7A&`(%2+uyi1$|TwT_^ev;P9l~lr%$U7C$3k*QY%57t! zr>Ut*FBwAApA$(H^$hsuvCM?5l<}GRHpVK7m^*0sDYDS+Qh^vuw70k-0EhFt8b!_d zuk*Hc0T$z?Fd^U}D1>?gH_JPQzD5zI3F0(g>7vVuI&s7Y_u&y;nh~dLo#$>YNyTXL zngq#NEDQe9+))Q|=b{*TN6KntL#Hr4h%W`NRE}!l*o~b;@dRbxc$-4N#t!qED#JBl z;j-L^T--GAi^7<~&%b14TQKxMq@Ys^<@D3!m7#ZF3GHnlMes3bF*+(U@I47ALS* z*gC3mp=KNBZ1^{k)0|d_(Cz0p#V{qZ%E@ij{j&E&(q*bB-&EvK>e_bG?+jwK1zb}NM(+I_!HwhFGYAUZqkIhP#? zy+qLSTq~q0&p_hS9)&`?zA9PAGP{dkvGP@S4bMz-ElDR|>r6S=>&+>r1(tGpO4nux z>C~S|U$F}(tjsUtpOG||lLmMT$x5S*M;%r1HN{!vrKS&u8%f<)xwK*U6W$9_AT0PIDB$tId(EVzK8}B2ncQB;1 zq7))!rnT}0cr$e4!fQM;F3skONgJrx+vad^`Io>#y2NW*LZ51Yii?q4?&7_TfMq$l z!bMEYl|xry*4rW>JCpH3fmp3gG8m2TBej_YpC+&?mV7z$Fm9|m@+F@ z3PI} zrC>~#tIAM?0gIZoTcYFBGH4$2t&2w|%&AzM0!q7)=GkR3w2flEpt z%$>Py)fE1eu&DQ74VqWVXBlo`KA;)8yj(#JCV`y0)_S-anrQ?Hk$y0vd@qgDSd6Xb$@my8nA~ZQxbISENY3QtWbWgpm zVWXoUgUp^H3UZ8rxxe;`VNi0`Q-8A~0G8r&9bigxJSfTco(;Dj^Ickb)vsw5#Y#oJ z-5VwMu_6g_y;>%AcGiWfxXt(>#|MNUUF3+zWp00e$8Crc12bcz3T-v`aY~XvlitOc zo~m}a^hwTE;;iLVNXW@4G0>K!WiHlOImflz>9LF zCig-z%0Q?L!a+kg%GE;R%xfuaFmZ$Zicffz8#3Awc#p^PxcX{`4$`7^7JP0$o2m`I zXx5&vo23BCw3$}7!oIe~Zr56F!GXs86L474!?GV!5NMbajIvJUMI=z5i>Az)gM&oy zjZA2)L(H2%gOB(fO!P-eziJ+j8nQ>RpL)~{U*)N7SWhGr)tZ!yVmmS4OAD|?I?V)Q zmrJnkdl4;v27;gs@AH{Qo<8nNm-f1(`aCuV@4qI z!LMI7jm$!rCOu5V$xtB0=C!RN%Umlr==PD}NmACN(aW9uSNeR2;bXCaJ>6^6fOr{u z7A5J0P|C}VR6Eq75S(8(zc#6oV_PsMwQo?)py!G+iJmM?=f@LB)B{n(ps;y^Yzpk-st?`{2Jsxp*Hs^1}?8Hz3InWoMY3lXxy7Opt)nMec0fAh0|B*kce&Wd9i(SNc^zc3IfO388w${$`ko z{K=yA6N-Zcy?L3ctIf_pvBH3;djhLYh;G;z6DBQ7blgu7!t*mB;OYfR0k%zDJ;$(z zAs-um7^$4O5DPr=kKvX&2ODao^11A09@_SguldkNjE zF&nsom%e_dN8x=w%@9vV{-aKY+?k>uW&+8T7Y=dd72X1?Yzaq(3xb*xwR!YVO0(mi zsG07w@{~j5+y=!Oq_fVJ@P%-@eBGj*8zxNcPjW$|{3AeByuJNh%u(5JAp$1W`xd^x z1|7foRobs-IVljdrwN+iB%Ftg zbZfz%;9TYT`s*fj745-_+zIZby0UhNGLvY#6(I#)IP;0wOsw;Y7x(nbsFw-Z?Qy|6 zSFnYRBuJJnZS$T9F|g$gA>gja7>{>sVHMH~Af)UF$1SzNW>6Ik_KAb_Tof0<3uWS^ z7dj1Q))Omtx&!-^m&U}k`BI%V>L(hZNBc-*lcnT5*x+0JX=C-6^t`{b4ek4ox~R;c z2c0!m9;Xg1IX2$O>KA0>`Q3iPDHkWb-yfJ9ow3S8p_GsB+BBh|-dr7|pJ1M}Ht$}V zO>#2KAu(S=Goc{;nKe8`e>k5Osh)ZdtO-R3@((Zea{URnRpqvwc3Ko zkSo?%7M!+9*Lvdw8Z*bbcIPlg;WIexO{MhoYyo${eY9jdyDkt%@LJ|Yowt$%)m(kg z4vWR7`@xId_MpZ|<}+~a8f9`8K~2cC!s%_-{L2!EgQg-a>Av(agH5Gg^o zU;`K4$rNID>zW}2)G|AFiX*l^Y@_bbs{gF9T&m499n?Nqa95Fr|dWo6?^ zNdrDis-f69iAofN7ZX=}B?0=}Qk=qs^V zAJB9*w18AP-b~Dq6c347u}Z0tY|oz8A)Vs=4Vl`swd{#lbR0>}>-|&S1kjB5m+7gNytg{wnz-ht zli7d9cafmOzv5)=k#IoHNNenPA|uuvw99FoB9_nc`8V%>@xWjQN6E^bij zCMzk6F-I~T6j#jRhE-E%-Hs4l23m}pnti&O*Rurj8=vr}5>BSX#TQKmhCo(@PXQ`5 zGiLB$c^$3kxH~-KejGn&#F=t#u;6t7YDjamw`b(_+*gzKCdpk6$S~BTW(=AHs{Cx} z89;o>w7QjQKMdgLv1*}Yf`-q#l4Yeg^5W#5YMwxZXFri2nu}Ygn)GfQ03pdnH$)Y7 z`gM>>(yuzFuLbW*OX?GURFv+g4Nc~FGI}U`BqT7!lc$kz(=E!E6@HwUvxEB{o+qHi zaCtx2QLx%dDOdNo2%DKRc>~Nw5YKEymx$LnNZj;=2wMtIx%AE=!BihCcEl)9=#HVz z84L1L9O_<_Jw1+lBKw3ev(x&;!XuwGetJ*DkN6{5i&)$JnH#q!_^Ch1a0QWm4}>#* z%(t?i&?uF#`R3V#pF3@haKNY%9JuuwGHrG*9ZWmHLDS}%-eQH*{^Z3qMar#`HOT_E zpnHOoN)U37^w8AUR%e|^Xn$jW|5$N9W|gR*wIg>#T8!Nc6{DN%^+?Jq6zp_8`V(mN zy*S!DZ9Lw%-ZWrE>MPmguDqHoYtG#mYnb+j%H)g`==T?vvO_?-Ez_Pkbi;K~>y+{9 z7xAgo+_suqozvMq9QnR?0+`>{Xl?GQ17ta(7I?srvj=natW9s@J+2vrnjjzaoNo}g zZFA{seGqx|icEOtSyz#=#rW6ryAMo10MBW8 z3S9f4d_0@8TSt}hIL$3i*ZF{>i#kWo#Z|(?b2qdXNKdZ*9_pLKz;{CP!mW%6b@dcN0j7cR6i;Ma~Th^3PS;6d%Lh2m>ql1YPTFwBZ^q zn@a?p%9tosY{0JDW;dy3GgSDhb`Q$ga4MuNArDpwJLRpSQdBwoa9YwT1R7^1FMfia z>Q=q6`{4E%#I(Kp9TS?!-B4`NGP}vLAa@&i=H}7XQQEL0p?9FkkSVI2G_IN`v%v4Z zm6oYgkyqFm;v=A;~>X zi=AQT;|kWk>0>A=6B!+^6z*?GQHY8)$GA<0=?!WMxpp9m1Lxz`_ks!6>javzCmf|K z<1U1eh(WNi><9e`#D+jtB%&`dG4eWvJ#qC5CmM95@203y>ZY8z+;Vl*c%~0e=Uik2 zgiI~k@uD9tMeMK}=Cx$gokY{PXy|x?h^DEqpL%{)lx(yp9r8WeKvJ!=`~3*VWcogN zV16x3q3E(-uUr2zBr?2@N@;(%Sb%@%i`D+2Z0X4lL|d6v{wd}xKfhiSmo*|gWTUWU ziq&9#`R0T_b9dbPRh?=Q;YS(m%RRvhjF*!`lTEjJYq|BP8nfiA`0jz_2^t);D!X@^#Eq?ld8B0Cg;`G|=2o09PA2%s0}YO1OxbQLqReXE6ffizaMUen?!#|>5e@DQ6D<@FyXcPeTzkpEx>2lPi z|L>Q7256H7Ei(RC(UUu9wzy#eBPq{-$$J{0u);CGo-TVI1?2s|5B&S%pxo>KZ}UQ} z#$Q$hHN?+q@99GM-`l*b z8x__mK?F8K)U!PoyyQoM*RNrEmd+3l%?}bZ@H=St!K5TQ{QTSm?QNY0Inyg{KyH;A zb+w#(nLj3_(vkdLCnmqN;R73qt|Kz$psu@r>Z6ObsL_X<0-J>9jZs_}R!K0tmg#0v z0oPpzL{^qP*&!Z^0vWQH9j&6vGz*@*O%>oCJoMCP5$XOavQuMcrF$Z#HkeJDa+dIBbZGkBSKTjz zBI+RMdl`CWM#g6%*R2e`ksp;MRf{~%3JJF+2x_tHV-g&<%z5)XLmx}#^VA2>NFH|l zWKF`)-S7<()k`^XpRU5&!$hkO^VUG8@M$iBmzN^R!9@HNOSWe5IR8V-?e;gdSd5EB zT958Uv#jl_pIZ%0kB%P_3(FjpDp%sP+e?A2!VYUeYVQ1-$af;>-5C{h2d8SvnptY$ zt??~8AiGnU#gFn2R*V6dTJ4~?a+UUm!F}JUr0tcDLR_ifXn_(0=o>|0^*pu{^c0_h zu-cr~#=VOXT-5Sm;*=HckJaT_$g1wMl`XE+f#916imTy*J@5n^rh^z}O^41Af#l>fKovQqt zLHm5g9Z#w)nk%yti%a}GUD?;qbJ}FRnSzWBAT0_GijH!9@4tr8l=ThvaX3GwLz-8#y=%PKfR zcW7ukis(uq@zo!vtw@BaPm%H+{hdk#LKa0|{f%>(osZ~xh{gf^O*9-`NNzlbf4#^a z@)}qXi2v~rZLXVgUHIKU(+AmXXrECL=Tip|EB>$X0>vGqZ#5Lb4Vh!8f;#CXuNCCc z(CB@OQcAK!1#xQumS9$sJ|VDwfMn0giqPpE+Q$rlQ?RU^nL>9By1ZVtMN0IWfF;=_ zuH4|y(9mq*q48G6zG5bz0wd=Q!^ul*r;?mVZx~YzHriWGE-o%0gyOy*S$3P;~6O5iXfKx zi3JlOCJ^jww0^&q2?YQ+P0;9m+jU%`8TCHo9@;O72S+C<^~c1B?B(MEuy17eQ-3wt z6NQ49!X`<9%avyDug=kHJShlgu`R3Cu{;v;OQBG^K!`)Zi)J&VBY|f+A8mvhKmSnkl zN853yquj=Rd}|h1t1s{+1GPx1q&Me`4y}J(rA!>S_C%$n+(+YxpUjh+ExX8=30g@d z<~=!4y9uUT2NS;%ji#>|5w<@aCIhcy9=<`=gZtLfhefY=_|Y4>7r*WEZt)VVG5-r{ zBtVCSX7*~~dIMC-Y{k&JygkBoJsp8r*ZuG|n3w&KNx0kJ4|XY%?=ONvAa0oSx6yL| z@&FSn^$3O|@{X={u}VdzXV#u1en2FkTckExuF|gMx>uHw<;h-R(6%&ut!$VMIw&c<2rZ3Ji zihbt04?KfLV9M-DvuOWT62bOYlpUeI{PxtHxT4+FGbdYv-ya6y;6t4fCq6|WoIAU$ zCy5wECbWlpRj+kFo`3QuV=5yNbnBuD=1qznlxHb)owBE;?T$R!@Gy1(f@-j(+~syv zc9{@pQyD+xSh@DPLIL+OS>t4B9hYq=?iMz3)_xt1mP7xC<-Gg`!jn6J>@S7L_oWEa zbs;9M7NNJC9tEtn>*t|(-P?0LqlZ`1M~cTHxtr{ay+_^C8F!>w2A%>T^v_JK@2+7* zR3eBoPSS>{X;&IFABT$P(yl8tI||OM=PEHT${21 zQ%y~Y{EwM&NL*;BQ>U7=4~|nUTSC7d&_Cuax;|4(|N8TUkPsc1R|D|!UR}0G=TtV) z-3KCDK#7crDJ|HcysDyLD1Y2f<^izmi%URH*Z&{0kNhPrS5UMyJ z!=Y|20=(PWgTLP{wG!n5Xy)YpDB;27r$JhA3FaX#oh!YbXqZ#>fk=n~aJGZt3<>w7M*=ZG z$8+s!kCmP4Md~aLp`!VAlS*2Kw^YlVbBD|J>_BU1;^L0xiVL+jjgztwK;5V?!Q<0N zR)}jPP4PuOG%_u<98j%mnc1LygA(_%g;9X>;y3(5Lj$FMD)$>EIr7JL>X5ZiQgX7> zwR3q~V9lQ}7B_4LS|803wEU*O5?-wAO*nS^AEg?>u08bU+|gEm_0?qm`KOY;pPGJs zU4}^;(DPXz&w*pen>ve&$23FFzO;JAq|!bHTf5C9e0y6{bPw3Q&6fYW9HJeBZ7S(H zPN2gZmFloy+Abq(`iR2q0Uw5b$=)+ojjJ!X*WBX!Wc5=`UA7myrPLVMH)II&4-yI4 zMHM@-`l63w7zLt9(v)F*GkctcXcZFx9kzD|6{FH7E= zyqqy85<0jnR3VUTl&#}ti)(VSeKsHC2}$qD1Zx;f3C~b7Fp%^^Mgsf zI1=HVi(*p-B4V_k$Kw$C*~ql>vR@6eH9$NN*J(VJ$&=f|!-n`*ORD3S0U3eO&)k&e zL&B>5lFL^bv)%6Gd@pYcx;{P2jUtl0=v8CUVTGTv9w%MMjbTLGxE|_})9rukD>Y7e z7-H4rI+zR33*IeTB%TFc7T*6~(}0;-q(`O90_2$SWN8W;tA<3i#9b2ok(%SDaKk!^ z1?vtrv_<*0+~VTh8$o_)kLBiPN#sa4t)ajN6X2l|G_Xbln}gl&i7aakCUg4$9nRhu z0AV8dKjrrSRPoH&)(Ky!8!*LC-8j8OL*t&Bo$fW_I++Fb_Q)z=FIP5tCNecUI|2+C z9Ys~pLC*=UcFtE5n(*v!2iov}K%>nG_pE^6VvH4OL|-{I-Mh_dU3zQUPp#TB+?G1? z2rZpRE1`cuPSt>^q`US6d%d!<(oH`79-6IZRL^!o`EMzJl9WRsphEv&tG9FZ2UQw% zbW#OGoD$%GK%kZk!!n^`5!`Z*8#GYcW)Zg_Li;@XY0_3q8YPI^!G#sCB&s>*wJhRo zZ~ppgV+#1onon|km&XnXH`!kE`C3Et2a2o3VZND9rLkE-_ zKi0{1CYllT))$Ls%!0z3%Zc(>GyY}r=S{43vm?}Wc|NwssV~n4r%<$m`tdaXWQRw* z?aV8_N6ozK>Nl;Pu(L&BrG>pR9Fdo!>-U+S9YA?z%Q!Ky(dtP-lU6*QLaj4F&}f*b zsQ0Zyo8_RaJU=dv_}Z3z%P4J{JA}(9*}p$q=h=yq#n7hw*`o%6oBEwwO?smTGxc?} zIZjDcoUCK84^2DSnQO8hFhzj5wCy3FV@TeUX*`C>ZTnv3LB3M<(aHqru0$ZLg*5=8 zYV}OewCtL>I*Rq`Dmhk1;Jf=mlHJ=YH(_DlHx}5t&!GfvwV7#m(5&Hu9nltL_a9(d z*UxiKDc45Fr5;1VFBN=?N7E~ygH)uwF;pu{(E}GynwB?HqRdgeXHQOd=(%rzfg)CNq?~uy>k6wEV2M=ZA`gX#*3tbZGlP{OzD zXTzXq_Qm-vKX7v}(`sN8gKy;Wgvld~3-hMxwC=;s^1XB)qSb%5#1|>nB!#0F%-R0= zZLxDWd0zN80o4w;%p=AFSk@>*$uAY^&nNv$n8?gFTuw&ts&EkW50te^_<(o!Uy*nW zibQ~B&~Gqe#LrZ`eh-gJ=zfSc#*oz1=xjcFnn9cD(wuQN{^qC= zrA~FtF^voS9NpmR@6aR3cU@%qO??t6Ej*ROE*ZJ>!5;ApcKs**t(BC|{sfTi-Kc`X z$?SKs-=y)_kGz)Y<{r5E^@)l9wLpd-joT$AhKEQ*lc(vn7|`FdbfM*C2I2fmkQ3!&`&hWF66L5pu4DYNZGA5?8^wvf{DoeLm7uIYo$aLu>; z$c9*4q@jo3hi~qwP)K9xu;Jq~2vC|dmvQxDW2?A@gbg&CaU|}!VWn|5JBY5nG5+S? zb;xp)Wjr8Qz0H1lk)8!c0+=WZ@0HJ`odj?eMi0cF<=Un-2;zkY&%?HbjZA%)8J4{Fm3D|G+DTwJ^(Cz7AGfS14K*TpnuU+ znVo;Ct*D5|X0yOS`E5s1UeZ zL=7mUiY8GVFVAu+*(hRSIBiU9Y)ohK+q7Aga--QYMszvD>u;2Ype%Kn5hAp|2-7*s6MHc@IA z$g|Q2KzgL27cB$miH5%eN~{s*ZNyj<4G|#ZdNWD2B@~vBJYgs%0?;R+!#Rn^N6y3v z6L7g(hnh=WgfmQ#;zn`Nv$aoVV2X5;#+9$T036SWpN8M~qAQUhiLMT|-OKm(ixiNj-Vk?*gK zo&fq;tkFhhuMv}3Yj&bYO1Z0WRA&S{vpy8Oy=OJN2jZ_)RxE;lFu4spqLhH9vuc-A zFSWm)&$;e!cJHzrmJ~s+vQ~;7+UdEN0}dTZw0zSi;MbpGkV?+SN6-9Di8ah>Vox34 z+4Vd$c&tAgefE5f-ys%MG^y<%R_D5p0dHiIk?N5NJapROGw6XlW8xPtmhKJ2a4MME zt~1u&y}C*oy;{W6js;Bmoi8Y{mX#YNQt%?JA-R7q$$$?axTG^MaR=r+ghI=DXp(%VoX&JZ1 z;1K+}T37m8^L9{avZVa~$mUTh@bZO)DD~g5W=$QwZ=Vn@Oeo|&dp@R@d+FE^+Xrm2 z!^nnK$PeF?W=}KqJdLaBI3ystcH!7OAVXM)ZM9x}>I#5U#`)IseP{#_q@Qatn75hk z3VpU&0xp~JvKXUB?VTBAgKjOm(}BS0v}V2keTK*kQ|TrfsI1Nl&LDk(X?L18^!3ow zrixj}{3kyJ04n9h?YfQ1+Zk#zKDY?lYHGnZ9lDQoqWlJk;vDsE{vp)Dhk8pWnHD}p zaHGRsf?UcB1KH_6%AAXi@7Mh2Oi?WqsY}@{)ByP%B++}qD=hXc2FeB(C`f;cuYn-! zU!r$ImhExuX4yA)g|!u1wL^>wr{)3tgKk2OQ4ytQK?lvJ?yLEF+#WaJ3J9Dc<&;Gy zF*nY~=$us+;0b@sJrdZcH9dWvCQ@-13cLJ z<}hMjxwbX&jkgu_ed~)|TADd%3iVW1oY9 z*0E~Y{N~Zn=4{8c6%ClIrrU?R)a@Thg||ftZ&E^vt40@G*pS~ z-L;ei4!*X~x3OW6JV}V6(YS+VCh~tp-51O7`%&BS@)=*$d!;)7j4mt`Cwp55T1b{3 zIg&P`NBgGt4(X{=_)UE`+e{AgF512!0kigNdiv5+gCwpJa`#_tNj7G+>b^LkSkBi+ zy_jqUv4|)`cZq>vZTeeb)QX|X<48annCTD1?VN)#6@N?svvM@kz7dXk5L`NxH$NS%VtDxH%f&nz0NfSop18_lI7w@|}heBR^3aVzAP7tHnmi5Q; znmU|kTikM4&W!;XR`{pb2DdQu7fp(utWPL9jIFjQo^9^X@mIdG7?uwkkQto1nioGv z(@km804iYg4EZUI^IZZuje;!M9RjH3t$||K!ojkHBQrgR>YzHxXVH3EFC&Y(Y{Il- z!=~GEB8G(<^f#?qahlmxC*TI7Vn)^C<(a!Y1p9sBF|5(lE!v*W;9qc|BdU&YnRBwiiYJopWU-8Hmx-HgS@fAKX|-oesPBJ2(thX~g#=(N9U zTJ3uXqn2x-;E<&rVO`Sz4$yo(obF#W4h1*yu@20vr`}A{4<)DQ$js>8zxz^g+=bID z|2r{_%iW^x$-X*`Vv%7Z zMOKi>sNX}|XLk<+@F7T$N*MO|M($ zX7%;L z#ibX-cXRDHF$OgWxJ!sz0GXH7HG-6{FQmc-V8|R$AEsosJv+KT`=w1?DP?Rk!EmJ^ zO6OnyDGrpe`|MOoxyPU4UM{ozxvpuX&nAu=v<7bo6MF|jv-Po*Dtit{Z1(s?_AHb( z_WM<^;qg_jhy1N!Uz4^bna_$e?0knjA|hi@pf-yMwf0Got|cTj;vO0YR#VfI>M(RJ z(_5dG6VfuxrGH&Zhk+kb1GpibwmX{I^kU0?vS$8tvvNsPgAT{AZ%J-IY5I==QD~-G zdKJ#ZOzL#x*-L^mv3O?aQ0d;vy_Vpa^TB#%?tRP`(7Ro+mNG+__*>?tce`EajdB~E z)XebO4`=*Tk^3s_BPA@Bq^tDoJ0qj^alrr#2 zc%8#Zqo+*z{ZKEX@Op#3BDqJot=5<)H|1@nV2 zAwg}K*%~zx7IY$HzQ|W8rHS#-Y_>kSl0D(B*;1Xuz4Wt3k10s}yKRdy9 zfRPNwwgdAjyyy5rqwuc*B*OGI8>d-n|7wtvEUZ!omcrr-28PU)}I#9TvJoK)j2VzV?vhcifMSvv9qXNi+etvDBtItm0m%_W8_8!&4 zZi+jcehaMiFM^k>_x>~s{oBeA6@2~@fNrXH_f1w5B`ej7BRlkF0bWqCerB4FZ;V}k z=av$GLa|5L$a>w(2Gmy$RLf5QIfF@+3Qd*y@LYz%zvTeefw5xd52tEwymdeYtDkJ7 z_keUO1o(lal#J7l8){AK?8gK6;Ubwh6*>03eVGOQl?y!qdPa>DgbV5?pg+kAWDzy^749?8OVB&qDz0Rjmy?72rM> zu?N@1KNwT^$ZBY4AZr=l;PcZZj($S>-0Fovz|3K@aNYKZym{bc0KW*QeRwG}ZEYDv zm{^x&JnEff{ck)@0Id3FG0tRZT=^VC_nQi$rp^IU)vSkAFqqZh_;XOfdthm7b$~e> zss!P*FAUa_B~9K==J?yBij;=vZ(j>icfpdw+4h^Z(cXpLwykV6NHonfE>JF~;*ulaGiIF)>x9 z6D1icM(!6=b1|mrRu88+A`zgzKuf|cvkZ-!t+L%BW>bIqy3~B83U~u63^~008Mbxe@7_aXh8}A+{ zU(g5>@I13d@zav_oo#APz4J*BWVKUGDhW!_n<&u>2?=?^8I~>MSuq#0v9WP8JjfHn zO^LbtZi!^3Aw3?Xx&9B#F;H_AOKt7dbLeeK<)#LeGUK7#gbf4kJY}h6WvWv-@sqt- zn2|a3*@gpEB#Nnux3J%l_^UNiki2>DP(0#9kdBnpNAOddC7eHe$YYIH8amC@*Aad& zVUWngY54yAdk6&bIN`di<}T6t>DfoQNQJ7#kJvMe=Nk+UUi--Pkt|<(BpZ&ABN%qO zM3-{m&*2@3eRy9VId-*o)Xi@*77SPS^EHO0@A45{*pi$DMb*FWS}d*$$R{j)LHo^4 zWWRG1^noOkB_^1LZFjd%JZ6}Bvz?APt0OJlL@2WzvB3Fu^u#8^Gaev@3xU25l=KTk}}*wiPg~5QB3c3W&urB zK~N!G&qMFGulBcDj9pmmQ`z5&wjPy?D%31-Gw>lNR|d zY@VG^A_W;rJaB*y@0YO<(OzVg*_yEFH(>qTp*g&>xxxgec}JG-O+%W6OeRRyNGYNs z4&R*n8g#;y9_1)cHALtNyYOW@-zy&v$bq^J&P@0kdTm`laivLqjOlyPB#YXv;~EC=;kUFVtr|O z_3G8bhYv4Kb_?r^D=Qs(V%UH&RZ(1)Yw`6Wy9!NAI)g1(j7Eauy=cKGqv~%6ryyx5 zRd)LpBi^bYA?){y6O%nEWpJLE^FnhWxTvBg=jC9ckMZ#pxZ%08J(}TT^Difu%hZqf z&(~Au=e13OQoQl+-F^iYIqgptOcf25di%C`NB!=-d*t2lwkz}r9WSov#>#Wu!0`|y z*i*9Y!bx{%G<_#zm#{3@qtF)F&ML!FP`<4*KS3VqoboOx%L#q5GpgCb2pFc z5o_0|Ztz%?BrH+UmpvR`6tX71G&`5T@_F-`%H32yi^XagAf<88z!yPbp`s)uft+TbxCS|H7M2@?y z=4{Y4=?KLO?}t1NXdzjtg`YzkMiy{TImlivFLk2Ip!j_s2^sP`2Z>N-It6c!Hz?3y zgA|N)9lapT_=d0kie6RwGIIQ<4dwP0CxPlkdpALL& z@XE453dstM!bf|yNw(^1_{PJR%q#Eg$RN>mbY^BoQ99wmY?)&_S1})Ud|Yn*`-MYQ zqaFXYy9FTs4ic?5lhvDz5U)xD)|qR_+Qx>8((Us2FRMh}`C{K=QayfLGq&u5`vf?N z5Qi}5I#y^PEn_c@{jBoGbfiH@< z=D6r#Ppzk$m#+>C0uq4OxB%%A)QrSJd>pIEL)-0rfKzMLSlPheolj7@Jj#t`zqXs` z-aTmO*RDafzD-6)aBTrQ8;TElt?RTsv!kg)hp+B^n-p)Vhs93$0jkWk6~?~Q%yU0q zmn)$@mp40Squl6=9XgiqTWP-bc@h!}P}zL-+bD-ZTJ3wd%@o1a&5M@Tl=F-LUwZdDW_qP&I`8M-aB>gh8 zveP)F6)=~FQVZ^s&E@&pBh-dVI7ZvvF@z)915p|dL}~ZtLw7faU^$qkbEl|CRKa<* zydSs2ySdC5Xy>O&3|^O0vEGb%s4U?A?c58G3uw%@b@)D$e)D;E+oScdV#Dtz%GAZ|L;9w zl#ud1V`#avC`)xq`^lng!0SI&Wn0a7#&8b-A^)-XM8G$(blL2%2Ex#;6@s>6E<+$h zT0`P`CUn`go2&@HUOs3=v1kaaKw;n^iWyN7`!U@V0Rh<4Q`C+XYxc;rVPm#fqyQnZ z&HmhI-1K`oB(ht(NQNgkRo_;q7rzr_m~Fhh7v044luYbDtErmaqP!YYBWI z)9%0LQHg=#E1rDUgRQc48bck}jEAC6x~T{WZ6k0c0dK%_2JfF&0-qKe47nW1My_L;MfZhR+ z{?U<6>@EyClTh5@6y)MsGs6PWTC)2C6&+1ikmM-=IVbw@nVm%HnX72netE#7b)gW!ID+QBozG_Db@+1q4l49@&Vn zpw8s6yMffHtMg`AFS0LeS2C44{Aqa!YWRG!I&Cggo7|SL(j-*Q6DJ$3srbVCA z%BE(HJnwjCci|pJm>R#9bvVcBq!_!6vnYFcKd=W?Q2mi*t7$HHeT(NRoO!+0@2VnL zIky`aIt0JjYQajY{gEX5xzaJ6No7kCv)GbSe0PNn@YaHcFh?*9K&2wrttZ(qn$(V0 zi@VSEE%xej0y1{xrW~xKNkf{On~S(Q1_lPu{66_o*%IP*wkvD8<9Y|xh+>+7JF)Tb z@CXTeR+Czqo2Q9bTR-{SOw5%!!6FZV_+aAAV(HTE?`a;_G(>IH_t+ZyX>@63`rKgJ zK1r{8jYMui{6n?aT#i_j7EH5g5Tuj++Isfn#2npNS_(6qeLRaL<|M(ahUxmmCZyMJ zv?F+9xeW@>zfNEcRQI1tB4KE$2cFF6;!vG4XPypF<|*cX3`and4x_CI%o82ns*??1~|TfsgNOFlWkv{e&Gh#XmbjsgALRGmqbRR$d z?(|sS5kAYa_1XG;;31)q`l1!M4Raq#-D`IB(5lY}6>m>7tArCPbv!|G1tP_&;wrys zuf5b4-cJM^Oc0>YS4e@p%Yvn*u22yRY6yvrWBb-%CkcrCtN;d@uf%mm?28^%R$A48 zeL$OqS20>*N}RiVs*hE67@LgDOnp&RsFmujqiXX4btLD&WaRARqTXzMpxs~S+WV8e zyHF4)qfNj#w;-Srqy80e=iP7Gn+}1isH;P}DSIrK{gQ7|>E$v$cmXy|)kWDaECXUJ z^xMb0-jAvk*KvYOZfYSzrTffC*94q0K`$g#q~UF@2b29{i1(g`r1P~MhrOA@+guzz z6hNq!94<0;Wu%iT7<(aNjg)=(9{r2tI+;A(9i~(~Jqv=8mFeUCUmn$S$lvNv+)X9#*O5 zGsqhewwUwX;ntLm@QW8Ri~Uvw!f>j6oveMZR3er>2rv#UEi1hlkJyalhpE33XiOdx zh-VJnyL-3D@dF+Xj>A|I6X=7O&||mOQ&iep4tE#IgHb-)KWA)UkV?^y-#vG#79%61 zU+c$_Nth<9%zBIvjO9X3nL7Am!kK3Be*OOJDWjKSfqM2ks`0pYR2&%XdCAh!mul}e8nm@87upX2i{^4{+>Ga_Z*B6h@1EEkEEHt4S*I~MqT049*Ub&IadGw^Qp*Y%hQAd zAs&EIOPuV8^duy&7$R?)!F^NCpug@BfFyx;pJp`zQ0{M>LJCpuUHulOny(M?4Dr=v zjwZkCoHs2wBUcn63m1$2ttD)4o3hu_0_d%!wH0WD9UUDs;r>9Yk&@B}^?%UO`D2nt!lyUsL=OOEhTRlqy*``*K}(HGqk1Mt=~n9T zAzdt`LPGMPjIL9`OPswH%OBz16jQs+=e5}P>9{yex}s0g)=~wo%@!gKBTfbYkfEdq zzu`FURfDcepu@h>$V38K9+XT79(=`J>81*sH5@FN`LXyR)x69`9;P>h32HPf-GU(n zEuY-Xp5VzI5?##xY zD~ltW!Unx2^c0@3>oefcTiLhD0UWSFckrmn;Su4r81CV$kb4*EzfdBk7lp1d*2Qj0 z6e-4`jxAQ+N0dD$`og`d;nP#$Gr}LuL>bCfUy}Ynj+ghJSW#C~-!jAkNDibOXi{A5 zCj$!G_Xvyy#++w?R4*b^1Jnw74mQ~M$H;!cO5&s*%($Kn^s%K+qj3);+kdv|4>ZVZ(`P`AWZ{I2s3IBj509g&)*_oLZbRHLd zB?QPlGu8GLXX>ou?SORRxQg55m(5mx^&&y$gvZ}m{JY{mFv)4S2b<`v{0<_3bCX?r z!uf(9v`=;7Vl<7fGz(A)vJmq|Qq?j${dMB850YFJ6*gSQFAA94L%$uROURg2nV2Fv zK;A5g?rXgql7FIXy*IlfP=j$Mq$R)$3@>Nd+D&uv9#jWXT_#zs^y`4H_4(SQFCu+D zAbtKZ-p}_j?!P4l*soH;m0Ayp`rp!F&GyQ3KIHUEwH%=hR=dWA_V~Y}eq9}7r0 zN#P5z!0^4lC5D@9R9S|MQdy00^SLRns>PWeRg5m&vX5K@KCd4lnEVmgeig%~H5{Th zm9!ziwFBMpD!PC)b~~a3FrgZBzx^u3wH z;SIid@Z^b}WnGw9dSqnemoG?OYs%d549}i<^q>!Y|NfHwS6Tdb(iIcGnRG1MFdhjj zE91m^m0PcfdB1L0H7!tweCv4ykfeX*pnkg3RYnn}3t^cL#hm>72|cb`V}*&zbowCn zXgE>s>8ZmXMfX1-0eNW0_~UV^a54S*W*R0YnQZwIu2=xNm?Epbetr8Nyn*vO^_!`C zK4`;WiE?Ui`Fc!F4qsx|&o&1z96!qO+^9!(^{D5|hmN!r@QBnL z=CUTg{r&y-EDQjEsGkr5ZgPKuI6{o~gh{u)m!j42-qrGxTr54ic$SOn+SBRETXl{S{13b*JM+FAL5Npp zU(x~-$!hoy3U_NRtbLuG&(rY!;9ye4Co*!?w9Y}3VZ=%jgQl*O{97B>u6XFn!{^nW zmBY1PF&8*uZ`g-P&_fTxI3|35X=0YvYmlmau}QP|b%H{CoEy3vK-w5~n+dODjw&4- zjB1EVWe`t5_SZTo8_9OeJ^P!Yi*+?|{iB`! zS&I8NPY1A|r9IGmUyR+x4HS=NQ_>Tap35U8Wgg=`CJ;Dd>KmOy`U%a7Az@~t@GINx zOo1JhGM6oji?N=muX0^w6@I@{Qik25FCwrpKKDsDTuda?ez-2%Y%&*9P;idL`2%UP z7cwCMEOI5}gZq^nq5HMuF{e=$s5&+Z(7B2rIsmx~aK5I{sp{Rjt(BFQRZ}2c`1ZW& z>gjp%__5H#q`SZaD!D{(A$5H?LB#Lw1Sf=6gvA(WemQQH2cBL$kQ!4CO2MgtOuvp> zEd`@8WHoi|yzMIe3Wl;kZCmPOhLuKRRK= z*-$!3U15qDz6WqFRs`53yiMPypU#hR|KQl5H?45A=sY~mf7qg2Z`0y>8He+DWdn7z zEnPnQV75E1+}vCU3TschRl;?+UtiF3Zk;g)S&^o^Z*j)lsw5;&u60ro%!h5^YqYtE z_p23>k{0OnEj-`i#od_uSm?&4-Zib^a+=!4RiUE7Sj*o#26N=dXFhTHVEPoGfm}&w zb$81Z{y^mb)W9xrXm!~cBk$sXA7AX&6VKH`y|fNffzDgT@JM$q=ZAgWIP6x?6pgZd zq@@e0vA(VcTGw5578DUZ*Qb~BZ$mUmnP)f;&(5-|r6~%X8g$wsH)dLeoc!oA+XY5I z^WSfUi;V!7B2WbEu)o;Y*y!u)3veMlu^bhaOXI~lddw4>Be?)=|L)y8fGN0wUu;>> zF)(O?^(FQ7SMLx{tc!TQ)Se|S zZCZtENUARo&4?wU-qvt=3sp07Cj`Y!I+|JU47L*S!tZtR(=RQz zZruVPc;--3I#pqjSXgM` z;84l-`S>T2z>4_Z#N&VdL)tl$*`vK>5_G!YaSIy%Q&AzpR;dUnP2T5b2@giD&8 zl6m)^=#4w68Tn4xy5_`Er)y0^Ytl&k^}NfrMk`;rVqKKZxGbjwYQ5GZO!8G^3 zsHqovqw3td+)ynG$w3E=fH~Ki`HrY(l)3H`1NOt8KX(()PCUf}AHM4z6mW}i=JNM# zKY_Nr_#?Cx`Shfl3ML=c){oVlG##1~jL?+#xXrzH8-iqfzH3E&Lm!(m=pofMo*TQ( zR&BL+96smaxKck8Qm(#O_gu+U07`zK9@XC@ADnI9epJx1ul3YUhZxH^KQSF zC!ft4hwyS(}l8K0-W%+XMdW9nqW@Lfx^)q)mS_IvY;5bdQvgMG| z(@kMwv5^p_zQ@iquVLrnzd2b=Sf)x(69y6#`WGo??!Q!>B zlyCa}D7FGiBux$FoG(4=&s$>=l#QqW&e=n{YbZ7eM7X*>W|Ia=9qo zYj1N8ye=!Mm3;hrnhx|`a9t8AcqJ36`6cF5Od&AqpB5-MVIji($Vg*`j^1Dyo-T)?~RVt9D!Z7V!?F^@#ASf*=dhr+T@INe zZ&|znqtaL7yi7*`ygMeK1>owhXW%9H8N&$1KbR!u=zt!NK-p~FWAQw|3lJ_w9t)0+ zifUg?A}1qz!DC2u3#i9m>aZJu*qhIQlulH^peC+MGhMHprZp4%FO0=%P;%wX6xGl$ zvSD+>vVj)CV6ZJWp%;%LfNk|*pM4|f)ms17yi{DRUByld9)WgF7PL&cvLVsOda~^m zO5Kt!eH3j}|xWh|lK zfIYI41g|!lLJawZY@iIGd&;0**$+kToDg@=Z!PizVejMwYT?X@LfkY(PS3TDU0I4v zuiDlSrTq`lLe38}c02Fa^9@TUK`Ni8o9=-G;RL&IK>*{+$N<&Cxb_S6S|^3VvFhq- z+VIYlloTxULl{gpcjS@x$0bf|Y;1_SrmCtcz)!1A7VEU}^6?R$wu^9}g_zk_ny0EW z1pE7+Z)sf{|4M_4;6DDXq0C^-!U}+~cUhV&GYS3njY2xX^oBnuPD%OLR_EyKEO53Q z%I9=gw0gtyZ@P&NG@>~;h|1L8y}=0rkKD+$dt{PS%4xuGCQWso4KJ@15%8JFU>9Q{ z+*Rpyz1~KdJ{UskI?>fNNZSqkd8z$6_{^PGe{cs7K@O`S)zxaF=XA76KnbA_p7<7K zxF7m8LTKQ{XW zCqPBS_6d6h=38Q*EaBSLry9P0$Xk<|rFA>6=UzQp(JECE+k}p~SSYu}chCLkFF(x~ zm9Wvz-+&iBMdl^qtCarY0J2!3tHl#+O!;Q|H;60eLL!QE zq5%ErCG6{1ddsb*jIrKFpaotLi?|o&MJOPx(%)Iu{wOui)ZHu;l;|FP(5O$n=*6SE z1<6d}D40<_D){=xAWY$&+8T=1F*ci3yRMX14U;(T$3D0ID2C@QN5a_EJIkl#6-Yyh z*eRT9HGyZ`$EU=RW(k1v^N0$=#${5ONDm%pTlOzoJ8Ek`i~S`s#{qsmfaLa)TmY~& zK#t#hZUk&7Lkrck>Sx4h4S(SCL;e0k!v39O{ulN2Mw_?Fb_PUT3&5oH`eErB8p7W@ z`YG-HtO!N{#r${4{X5?O2WtF(c`AP?&ZOVERseFjflw}Y1i%74uR=R?Blc*$z#XeO zbf|CBW&aAFhTX@{cz9xaTBO6pFafO{2!WK8x$-pZ1}t1$(LJxe1P1E;tr-B+hW~_3 zU`2(!tSnmW1-tF0yoH4YI{>gk*y(Fa*nd0In}kuipEsUd3ZkzV}BvvZDSay#Km8 z^LQXR%Rqpf{aujwg(ms|@Bs{{XAxN(HglIw4`@ZbQJxP}P&NPJXn;ng@JDrgV>I|Z z^FJwI;<)z{!~8>uNIY4+smKum&lXtz$P^Tko3{a;n=|M}U$2BmwXy>P)|DPj={c)+ z{mbdf{#2kPO?4mqeprG3-WQiCQPS5ZJ>$yk zID$mPQzx;@PBADhl!5;lI^z%waKw}RkL3UI0{lmOabsYu^GQV4%4ZZ}tViCOnwqlN zhk$?Xh(q7NV0Rh@3f}Ld7M&5>U5UU^N zD)>%heLTdT1r-&SmpdC2uKLK9;#HvtI~w2h<-yUlEPgB1#nG11yLaE`*PR?3Uc_Po z9davycKi12LlYh>3=9kcf^I-@Z)*dx6HBEI)h%ZlfPiohKZ#cYZdCi6u7ue_~T{_>6if<`mqJ z2{?3i=edSH_A#P3YN&E87X${{u9W{GDgVN|I+1giE9*R)O@gZ^yQY;FZIH-RUtgD9 zH$ZMdK(dJ3@6nR{e)C|IGP@jLr~ckpZyt?*;xp~3MgEA9fYN62C9#JPWM%dK7!zzV zR=~i(VA`7j@Oa1c&;Ie)%Ci5iWujtXwXP=Rt3zay-xYDi0uT()8-X(ZFTHU-V1npH zH4MOSOG``9d}lfb&BJYNpC>?hf6#)J@_-WwD0Y6BWjb`3+3&Ce`sD;;v6zGepn?HL zpAR2MW4$82e*OM8K$;-J0006uHgIdWDwbwKPB5?4Xs`cUPv!53@4IXb zQD-o)Yy&WWJdGZx|8Id4LL>fLO^bjBej5uW=?z}0+^e*mP-Qlf#XhwH*f;fd*ki91 zCbqM|3OHxb(xnK)xR&7uy)hKs+0(Ykr9KAn_zMNu!Gt+HMxFVGu8AFTRQR_!{kSCu=G6dO$x5I9`xLvaoLmOd(9wi7g?c6+q!x=R4~Uwu(0Zr{r;i{EmvH{TuIno&A-i@nx#n)s<&3S9NJ+j)C6lw|lUG$@_M9NEOkmtUL%8}G3?WC70fb$8Q#T#BrnVXgAJQ4Gt z4vh3L#ouw?Id+dEOvmS0@0LD1*FO}9W2zweouI6Cwa%k&r}rhoU3d%SLkw2Xq|P!v zsO1Grt&;l5>9pt}Ud?l_7Oy)(wg9==o~ZN$DMw}&+9{+vC>!|gxO8*v9U0nN35o7_ zKCybYoXWw$@U8AyHz_T%qRqaM^^LW$-N_GB$=_1L&S-;OzXS#8@8J{pQL(bJa&yP@ zzy1K4G4=A z=6lq#>vw$LC!xJxWrZxA+04h%uHd>OF+QnYJK;iAV7Qdfr642}Ug3Xex^tM_Wl#SS za9-KKbfBaS)Mq_BYK-v0kh$)Qgp)}|lp7yFWfd`s233LcK+S<-yY@FVNK$?`xsGE~ zAO-yj0gx^M=cTD;I3V);WfDk}H5yD8VHh>-qBd(HGa?PS%t`I4d?j> z-!S6&`hw6nPGi~U0ALW2^kzmz7oMP6(FCknS7*bF8ooxbMLYSwNPKzX08&~mlbac*k)WAQSTO}?HX+Kp`NdjS0)4Ua2g?fg~1pOMy zcAsAyCo7Eg3qj(Z2@@*$cHtubV7s^^OcwnlgyQ{>r#|7m(#tyQ$gW##y#h@Y@fPv z7U@SdES@wPtH2GehzfPEi)_0xR5{PRhy{`}0IC4-`@%%7B4cfYvr5HoyySt!ppxm8 zxbj?oL4=@5k%hfkO<2C_*(YG#N=umR(2iiPmE2jYQ{QRPNKneYD|n+7@W26jiBs+D zRnTHzZcv2%9@7Tx=9sVn<-mpl3lp~D&Q!7MVi@+F>3d>LwAPr^Bb!7fiF23NH%g&GUN+3j^^f{cU|m6UFa49i-a!iNuiCN z>HBD>`rMdv@Z=|mT23c8kmQ*U)YV6v=RxOtUnfl24a`oROAC{rZ9mLy{wb@Pn&?Yg zc-)|B;LF&adnWk1o%s@r(6~z}oBE($p=GjpV}&t6NsjA_u?G7pX4EH+5k77X)x>HA z_)p+PY}L#&p_m)E{rjIINnPWF3rIafJ-{|AEsXrL0ri}4xy}7Ys3G6OKcQJ~iVWEw z0M_?P490-A^aMXB>a{M1xBwOs7pcE{i5kA0o)gBBEVz!c-oQK(2bBwPNUJ=Bb}8UY zW`ttyQm^BdR`s{BvMiIMml5NchG$hboi# zlIRt69MjYmE;#+}a7>PDV1jSWv<4Zp*Q{D6corArg|>85fZZQ8WqadsK7Op#(Lp7y z_^E-=xw(Y2yZdi@)xWVP0cAM$-(@%etHP_)wu)9mYRZuBfc*esH;FG2f%VG+!S57Z z`L;NR>ocmK11FkqZ1X>Kga2HJ*AB#_Rn&eO1He~}agHzqF$5x8<<7dUgFXSR>$at~ zV1fN+bh8plvr}n-stcc2O+oMbiDrWqsx-Hln&6U&{I!HvL&4Cx_x(jWIHh)+gvW?C zlI-U0Uq+*y$EnXOHl(EEJ;O?gm<&{~O9y&56BkJ&EREI^cSUI>MjO}may*+3<)5#%v$!R&8 zA?>8RD(?u<9t=e-AU&DA)7Dvlfir2>*L-I+Us>PL4HB&C%;-Q8b9?@L*%wZzB9C#@x;kML+~yNG?* z?&@M0he$0iPNbitg}ADBaB-5B$ZK>+>9BX$7bNv{0?b=Ociuf&E)bH#N8|)F8tSp) zbMtqx2Td2TcUBp3cDj1SU+?eY^Pe#RqZ74x?mPMwH$@uYD97};g(d`8Vm|ozyfq?+ zRypU>&mLYqt8;T5fU~v6AMl!}N&s%()|+UDxzR;Y8yCh2^jwN|1wDBHdc$S&N?bvy zH{tl3jrQt<&L9or+AutI$fhN3{&+V1f&R;IMqU1JRr7+=h3qCRQ-QFm1@svAcSfwc8q72= z9r_q~c=sC(7^PJs>Udj~J|*A1IlzdmGP>GE^^mgT6;$*LDWY{5g|G^9MGKT(dKbmT zg{BqwH6!)r={f=lAcjEIxkQkF)`FnwWm2ycQ6u=U%Px$|euD|L(_#&<;p|$oPtl3+ zz{_7J)MJS#)nHp}O8ZxHj7AdaV~i2+yAc3FZvK5l%Js6lyU4@DIsX~X`8_HWpk7<7 zVcoifj4~FHYnWuxd^##us(I+x@m6=b}~$o`AN5$Kt`)U`7mZdOr&5pBAkj zKCK^)*&o0DTQmP#3;kD{``_H>zunybH3!oFfpoUsaX*Ei3GwFjC9}1HRzFa{gQaeL z77^f%c%c5}G6No%A6fHP>ip^V{%_9wpRWnD<-hjv46STAQj$VWQZjHrw~-T|V27=& z?64j$Rg9Hf-Q3(nCTzY;sJ-lqqAgYBP*P&Cq6mfT1l5n4vZIfG zY|w7jzAYCZ5;}1DK!AQ{>7GA4RePr_*GpMK2%rmAR`5hcMTypWE(@9Pji1CJL}me3 z56ZQL=|@0Fn44?jaLI7pySctT?z0BG8yJBO{lPs7mc%~oeT!~nWMmwBU>?4`&0}Dt ze-26C1ueQy-vCr0z;Xj$_ebHwC2xIRSZaxjbghz5nyS%aR!ro#Eg19|xMLVjlRuhf zpq|xIqOC?%AML)7nf)dfAwr2MzvQ{S?>4MwsIZ+(d#pQJ%J@jdpm&B!_F?=?znT<(luzU)5+FC-|nuVIhK0m#%N|W2d?b}N<1;ws?}`hJ2C-~W=kLuUc6(V!}p*P9cfMRXL|{wn9UDUp$qjN>kqdK7y%rgiSoF zmHcN#7p^+G`o#@(R~nw{0m9PQv$5AHJ_0PZuun2x*1FkaY^@PjEqEHMBOkG9jz<%C z?T!`ST6yoI;=jEyd!NBG8nl-y*`es%@tSnzHBB4C-mw*?U3IDh6&AtziMlblCuBP= z+tPg}$M(;q<_9L~4`D{NWN9G8fwN&-D1|ZP}^53 zD}{#MWi31C+a86m!v`B1bc_BBmv%Iwt~6#R1+MiQ_n>^Yc~A$SJz6-`hbQb zG981?x~4Ndwqz0wZvyHP>3EvFssy*wOpS{#x$3X0FZQCM0N)xVC8Zm>)ykquVS>yR z5i+tAL&FSQm|*AaY-_QqVN@d zteLrvBmpbSzH&lm#+Ar3`>oLi8B3eKQGB*V;9zG%?K-zNpVQQroPAD{gLrEQI@wt^ zBR-!*>LR2&b?wj=8${kss8#{mNBQhTm%d3iu0(b zF7>CHM0S4La&~(=0XeahNl$X-a0k8Z?V~I>et1loGWX4$`Rr(8UFhN@og2@>Yz7uO zF;agiV%G`ft0&@2$-z3_gh~sY!@h+r6Y`u8)TpODL3$!_c7?Bz=~f3t7P7JKV-NRo z$Ci$aAZQu+V!(ZR0^NqvSz6`{Qi&15y?Emv6>3h`II3+994?Bze`BweTnw}9WE8k6 zxpoT4kbC(Rq%dHBkMhKSlboz78YJbkDJInSvZv?crC28aOs)HDxr&0Ui@M!wM%X%P ze>S90gDt0K*d*iDt430<3U^UTQqoj@cst2P?+j(HIt;eUUX^9&a8b?+Dh z_VLzqpf85^=Jm257H?TA6 z4H-Bu>f;48kX$dr$257$K?ehJksm}3U$=ckgl&=b*1B!^J?)!>k*0H=P6Q!W6zFSK zt#GapyK_u;&N(xKw@xzwt~fAdgRy>dm5TRxtKbUV;P3={O{*95^x=ANOSzg7OL~2W zLCvKS$54Kv+vS;W>Q<<$t1p#?opo#J2BC4d$ruqGe!uwQaLG(Q0~;Psr44je_+Y9> zOeYe#F-DD}{L;a)yL3&^RUKwNjWHZ%F;hjmoJ|c%JgBzL?^k&fSa^R^9Bl3cujiPv zTQ+8q%1&^#%J_^9FQLzV$%)G>lpU z5;!YHP5XtzIB=e-uQKk-r{3dAng@2tp%nv$*k!8FNZLaO89mOD0!M!|F3^!*z*fBeVNO?p3jK zPnN4Fm30-+iBD|I*(YUwNz&=p(WDs3u34pzjtRIAHu#Ku)G9(|ot0Asf4n(M-z?LP zDyo{FWv@)Rc-QkP9z(dow>?voQ9wG=L z!a=?eY=1%);?SN{hfFau(xvo7WTyw#Dq!)>VW-NUy4ZL!k#7HT9*uaSh1kcMFD zYdD_}!#5A9)(?So;h%Y*(OI8owZRzUPR-#cT#wjLZMX2kTuG*Y;zTd5f5x+2cw0V4 zHujTLt+OrmG`yUhrCR?os*TY5qla5a!968fNg*LVrzeNjwujKkveXXO?o&ep(a-(! zNn)sUE+xt#Rc!PcYVHU^Rm=vVkuE(Yh@@c-$*MhF3;s#2pCfGRJd)~ew;NpB} zfwioJ&v~7|$HO_zb&DTZ^3CuI3l2(3l*D4ei;P4r6BU)63Fo4UYVa4OcacF=ojDP1 z+*!+FL28jw&3?{@>tTt7q$%PCy;VNQj}La+{o({adWnUPnZVBjeIKAb5GLWfy?fE2 z=GElU_ME_*;2}Zk{gd7ABkI-PlynMY!#3N~=tsGNfJLo`TgE;=-z2&k&lkt04NRl$ zq&)zJOw#r0JLm*U5r-2B?3wtFmYEcN&U)=QP^M-`sU#}3j&tO2-54t@^}#A%(NN%9dJ&Gz|AY&M|Ngyaf`&^MV@H><-{Y8WaC$0bz@ZJQ?N=M5AQ)zMz>eAk3cWy*SD?3(+6WO)y?g{(!}>R z(JlvHyI-K#A|1cs+jHK1Deb5^;4vU0v??B5A77iiHObaLGLqgM6IW5Zsaw7{ArQFc zYh0S6bxeQ-d3#xHV`pc_jDufal#-i&Y*PzokXqgzOcp6LSWKOab2mH%CoiOXb9_sm zGWnqClqas_z7rz8Nm9@W`j!M0V%1&_n?9Iuwu67Lu_R{ESFQ!!(|{zy`k`8MU3?5z zb;Fj+l>$N7i3)d3-rn|l;_Oa0Dym8Q?nyupSqe=&1>DfMAoQzmdk44yOlN#b{`G1nALPStJmGMaL{O)j40yTZfjj7m62t_MXRT*KCx5wAqjgpJENRDZ zsl`>>OtBo}8s6GGhbPtWr(Rd12{VXK7!q#P@dMDlOavV_hk$T;^ZH(Iy10!+8?8UH zncb*|uOGY7w*sOB!WOpAQrOKkoH4#O-1($RkksY#9ld~l%Uw;^MEjEtZgM#j6VlK8 zxa+4y2?*gWV<3x%0iq|GWZ>`WaaPIN5?MNs3Bg6XH9eusk!nZC$aV4Ic?)5OSXZ#d zN0-fD_l+a*qbsF#xJayTBY^(7$gumq791tGkY?7ozK*E~m|tY83>G(@+`zuVAfNYr z9KObool!GCWI2#kGFm9!Rx+sOB^t1?xbRZmYVmu^+ub?_tBgw)VSj(bYe5y!6Prhb zi<{q3W%kQ*KlY-^B|IWYf#FY7oP1&#!Gp$-6Zh#cEinK}9 z=cpkjYueJAT$Z|@+!0IBizd{BWj<&12N^J|#s;P^e7J>r@6`5SFNk20F>9aWQ2%}5 z-c@bm1+5fed#!aUXm2M7b$*O~={0ok>nj4l$uI8areSuKlFir&c{icgyVb{OEhK-@ zv`WavT3KDVn?1zE*5pOV1-uE0u$C87w9iQNQ?-A)E;G_y0?PTvY$9FR$yGJHd(isq zIyb?!QJwS3Dwg3Xlmdx}$s<4zDMwcZ)I#fY&ctus?mkvyJKFOBX^Sc=sO0aFuAj zB8_b@_hva}b}o(aR#vsLXqnV>)&tI$$Fj$>&Xi=juI_hH^UIOXI~?3mP^7J){3p`~ z2_Pke&631;m#}&L4WnmDu?cExx+!-NNJEt6l$IRrxRf`=>Wnvx{KRr<+8lrz+N_0GG#Z2OI_)hnqw3>o>O z<>Hc<^1#68747)1TVvtjxXjJ&eUmwP2o&ohG@%fgGVH5EAgtp#o=f1ZP#w{ZlPK9|9QcK|f;_NMh>gd{S?FNEt zffR(189 z-95(~*L{y!iP-8etPg?O40bGk=S1EE2~ibRPWyBP3Ic}|7|3jSB-k8Rq6|2{>b|Dd zU@iKqrp8pRY@l8f>-j-Mz|z-+~H?IgWbd% zvxULrs(E&^1B#y~gzOp$xO&(fs#`r(8Q8tTE0%xC|P zna+{1;O6eNmPP^=mX)uJanUfVO%`Rsa=O(-BD{5jK{MXUy7LC9+A8|cYvT+;-!~fPQRkSA7TA4Y;lhlqoB1pXj#&Yu?lT@?;9^q zHY6>B?&vz1H?58>5jL2*=H5pRm#~V^AwvZnMP+w*QbsA|Wor-9h362XWTzS4OY(>y zm^@65A(MUIRbE=Ep`!AtFBgDEaK!hOCl&aTyRnzuk9VgQl16j8qFYf~n)>J0@e%>` z%@tx_PO1BobKpeC_NAv-?!}A4*E4$|2iV)JZin?F;jam4>ga3XZ#idA6dX1_9$}-K z0Pxa-?3jJbx-QhpU3mTdn=D__R4;9nFdxsDM+dFi)4JT}&k6?Y$%DUa${6V#sPszN zy^Y){g*xy*B(CyGj7t}@6gRnq4<|Snyb^o&ZY_P=sqo(8_Qp$8(Xs&zJy9Oa2`9@; zuo;09EtN{Y&H3{m)>edRGwg9|XEH_`EO`BXJg{3QOZ@%yAS72<#l?j^QT9*!f<)>fG-;o&$f}f{Ub-?37Q#EhyObkT zZ1wx~aC=pc_XzI>1Jb67Ujz60;VojNX&x#6j^m{xz7y-OV$D;c_z=dzuF^<`fzZ1} ztk2*h1v~rFn8!b3K%}B`wOI>m0Vt~`1dvm_W%txd)BLJ{If`?y&iL3Ngy}{uK&(WH zF?A?-J8IOP|Ly2i?-Ku*Nd^zns*fnBS zSez`ZcfuvY{JbQmpu_!+;O8{Ej7XrwZ*gT#4x>%}GB|2)D!g76{Z-`y1n z8UIfxBmX@aOpITx7FBX(bQ0gP%?kzF(^Xaidm$N=6Cq?C*&3y^v=x{`=?C?l3Kf+M z!i=4={R8q!3YBtu+1CtpQ`)mj;-{VKSpEaS>l%`!=||f4-^jYF2Kl{qAt9!zeMb(v zLJN`4ca2juRYdJPJGMsVv_Rlh#mtWGle82L@HTWAbOi!`)_mJ&-4y@1 z*ThQf%#mldlpIAhtK%XiQ$L%99)6K37 zVf&aAPSJ)J`t!9!`k(gOsFa+@cPVe#Ux@VuyzW#rV+)dL75%Y3q#72MbV)aPJnq$I zWF?g+>~O3*nSy2v8cY$VIZP= zPf(LRnsE=K5SHrf^)}_lE%9Zuv`Zm5Z`idyAqHf0XY05%EF0u!_SU@YL45Hy&^w_| z5E&;r^=b8`X@q;&+-q*cOSo=wHoAM=)*TM<`BZn{ z*G6yfzut&4h7KL%m@0YBS}SE!BuA(4dMtaW@uNrawXlm0`cG1D1KJI~s}1eT<^c3_ zTU46`(K^IwF0MBnE?X7_pG)j2>OZ_JEv^B-k{`@Ye-bs{ib+)sE_071<%#aCbz*jSu{B+sQC$?3r53#`(R%;8y{Kbw^I_X%6_mcsy^ zsFp-)u4hm2oJQ#LEI~iN20FU;J1b@Jn)CV({+--onflVOzw&J{-vYFN0`WT8y$4S^ z$&#Xp5CBfZPt58SMaQ%2qh>Ljg{Do%Sz~rOPhzG^n!?n8G_sIq=j>LibtSz&|BZ&L zvT=7hxs-79?~k2kPuea3jjV36K6#DGVZ{Ah-7N0lF*@GnAsngv8UX@;D$#64l+L== zg8+T)qs7IA95h!56%BZ*qtw)V= z+fqYLO8lonttuA1jX|>?{nHavHsi}z;@G>lrdc&;owzs2p%e#)k7yjTale@ zfC8pfIx6$}Bj(`Y?2&#E3W^q%w_0P-MJX>&&_M&g@`jUk5?|Vv^)j=)nQA6{5*UHs z^oGyRTANDr)Z10#?#D@{gso?5wOg_F4J53f=JN9DGL%@d?cGf*cjik!D*PkC40#GZ z9^OT?>uPp&s%ec;EbMeBf@xxPIa|$w{&n<-xJiytXUaPJSYo-c=l_=L@1Ph0_|m)~lJSA9X9C%zxC-FGLh*h3T@0o`)8A=s%o zzMUTn9A4P40I4n7v)}lht%rTq>*I2(&)i>}cBRiqKc2a&+48R?tgNkdh#OBq5~YPY z3<}ogzorjmLC<&WX`|S@@Mj626mA0G9V%}!c{-KX4L30d%l2438CTZWSy9TI;aK-h z!UwJ@0iitU^)>B763M-WD}ML(goLl`G3(syplX)pu#uBuipZF8HI+E{z;AyC>NHW2 z8B-TNMnkD7F3j#`=2^b=AOvd+&pWJL9*>szl6Dck=f2#_)`M2E(M6v%|0Oyi?ECP1 zBnZ_~^H8De(eikFP8=*q%;Q|wF-2`~O&vFI5d>SgP2E^3{sFkq)#EwoYiom+$&0Y- zobTJ#ta=vevTa#s5O!ey;#OF~ZP;_flZfhQJUWzclcLs{)I_i=6F-{N$+m`Bw|HcMJwv9CI34vU5XuB!E@Q0ys zZh#SOT3(s1ouqg0glPLqQC!{$3bx(ea$g}G>n?KHaZWZ4(AbivnEC0EQgrgwC}!+! zu(LB93lKo=NHNl285}iW8*Oeqw53inA(|wLHM5k*zvAI5Y`8k>x>PfJt0ss~v}{|s zotRO!ETCKGzCNik?Q9OH_wx)e?}W|cj0h+pvGe=79yGZ8<~*CrnDTael&Y40%FJ`< zcDPFjZMU-Jwhj662|RW8>LcCWZo)_f@H*ZkFWyIwFT3unWwCzZCdqDp)Mh(qJN*u_ zJomhh?<01(kITmRq{?wt#RPVWkN9y4#YTP_T=-(^VI5ccSJRLX2-TI9qtknJ)1>Qj zm-pIhYwn2aj2*+t3~@$^$Nt)07jVU21Z7<;)Z(j*>V^NKLFs%TCfYZlvm`tz`A0O zYzs}Os!w+k5@2f-Yg)8o4SDWnTwEsJr=n)hl`!PK$vO|fa_qGt>?(#29 zb@zk+E`2_(iAyOXAUGNcaA+GKR3fN&dPy4fksFm8qm0+GmcxLG z+82Z{=Mh64c^fwFid*XNAi&q)zM9GbedO<>EtkW(kGC5_ur!8jsor>v`a6tyT0WshuqA~8~ozc!kRMXn}h7B`13TETXpuUtMGjd~}phh6?v zw@==yXj_w*w(}2|GVlE9lR0fJc`UaZqYwG~{Ex=LNq*T3TgY1W24kd`-?>1%c$zy$ zfAumi0H7o7h^V((pH;iOeT{@S>~Xnm<|=rOz^~E$U~VUJSMRQU;)38W1(b;OR=W|C z_>~PZ8zD_OGG?d2G5R}L8LTM(`xS)lO`^FME zEYi?hqE(K81j=KsQ{ILbd<98{4{TqbKcZr{c*btcP5Z!1+Xlb;WVHBn{J7Q!eA~$B z38rF)A9eC}W<2Pl+-Pv-6}W6T$8Edq8^1W-x0kXg#1=X0$kaK@isN?&i<8STybd5E zL-;Mlw~n!<7Kit%_mXkay_hU$ssz>UV;G!D%9KcKcpb{dia`_ zho7%8Rct87lI0sz{hmGKZ{NlA_l$_I{$`(v|CcNUYueD71gH`@iIR zNqOw^%ZsqRtWdU6p=zt6Mw(5z?INj^LpF7BE>+|2A4)kxgKgJ?PG}+5ugt=&wH`0i zx*E2a^>0F2!BTT0V^pcycqMl^c^9j#kcO}|Qm1B^d|;KlpO7^)_TUv% z;9XGoaSu$$4uCCv4um6R;x9pMxa#EZSdK)#-i!<<6eNl0~F67DdTTDdz>$q4V@ z!Uz85t?VOSmM%sKd{C3flJ?|O+=}lPqs9KofDMG#82+U&Z+LI6j(wor?D{8&0xMkn z3ti$>);6~mwuR>J*#ta?0%V>j*2!w#UN?DEPNcAl0KGT#vYc1+G@GGp$=>8S=b2eZ zH*WwibWu=O*O#c(&MXz7aDCC7*)tc23dl;<`jxEJ1@p5k?}m`P;BUczZK+IjPeIDS z%#Hrw0Ho8sE(2~ZH(AeQ^}!TvsRrR`fLVL&L2)fqoHe8phyv#h3(R_%6qQ0n$q zpPyLozup$!uPP88THS~$L?^`+j95?}7G+Qm7JioWzMt1P6sY5QwQjCUYEKmcR@%YF z57OvJBH|aYwV!%O*4lcKr_*>Ww9UPDzL%_g63TzFriTPU>V9@u8v^0-pUQZ2WnOIydAp8`I*rU=|G zvo-*v9}t2loU^N~PK^lxgDAKWtEdVWZoVSDut1t#YkpF2?CIZ$c5Y} z97Hs}E@Gb0a$Z+`_^fvW0b$;LOOI@P;39O?{C*|MG zLI|8?vg3~Qk50w*DnDK3og3}iwn`(JsHv!`f+CMa7ejGWq1 z0bM`MZRwUoGM8}KL3DyE1Ewo^i$XwNcEbsPmwtDV`^vJeAt(9W@#N$9tj2LsnM#Xi zE^0>7=5`11e#v!`$-BJVC_O`+uTtFumui$G{=&WsZ{=a)fiq3t0$0-+vATNX9}l7m za^-&B#AePKc#lwND!xS3@pnw&q0o{cEH$O_7D7t3NWAS63?QM&(SDbOzJ3OKuYn3- zyH)iwvRm1GMcC3p>!^skkkp^Qx9fMr?L|4c2ACe^W8Jf#2J88BuY~Q;cAx@N8kYm) zL8p{Lubqd z!QOq{U!oJr)Q@NN#<6Ja`0q0I!XQEGSwbp+Cjf6Aam*3y<+{M zDR7)^Vl28zZ;GL0FbwQ)2N%B#VeFS=RQMHot>nf zw*;*{AS=nMPAV6Knrk!oX-HU`$UUZUF2AdxTN*0Jo&#IXUah$cmGdyk_6@0{0wW

%hVFfT-N^ftIj=J&`_U6VBCNQP8mGeZfDDv_hkUDonZb=5+dF|~Yaa`{F*I6}g z>{lLLdm>Cm_&2)4oSx3x>FL3T?S-le=CNlU4QC<{sQO|dehH>HbFF1?KjuZq?DA)B z(a>b(hx7AvWrz$Q1!TlU)lUpMH@{MRc9>{hw}cT{I>qFq-pz{=rzQy(snJqI@Kbf%gapAXxjO1u@ApRTQ;HdEH!G24DUnb&@@S1Qv>ic>6 z<&o2>6%TjK`*!;DvblhlvffI#g80sLV`O1}A=w%frpDVI*^6OswKRFSz`d3@cuTaU zg_cFg5vi>@KjdTDmeWN-#jx-pJ_BN5z>0}7(~~KSmV5pUcbG zWsep!sY^j+rgYZQLgj<>vs_8-<{i%AEnV2sHJyb)a(O^19W1Ce8I;QHaJTkd*va0I z)=vk6DO-U15ggT0<3Rt`cD1a;4q1;uzBb;52_Ab^%I>HnKwGjygqOF^y#Uxwm;}o3 zajeVbxo?VG>bsa)IEgSqQY8D)apC0q`90L4k!NqUS`3DaQ?+=bgdKB2G*|b!)9m;x zlGL~_+iV!*$J8D)SnUE&)P;NdLa1tvPH+2_Srs2aVe|eYHF`%oRtX)cmbt}fcO<($ z8#{-DqJ)Yhf=mDpc2lp_P+EK^3IDDz?FBfIax{<0s3d3iUcUSHq!LAm5k0%?PQ8)CS<@rt4%NeVc!V6Lt?&8&E>rHenYIt(_< zX86s`eJ`j+%NA_8QIPyV*2c<)Sa4Ns+&umUpM|%We$$S_dKt4(c^`6iAIITJyRT_I*T|#PT-Q=& z*CJgnUR{QCS%~dd|gCR;=MFkZwON#{Udzz@p5X!iZwJ3aszcsDCNxfpD zlUL9oU}Y9IqJTp(Fxif2lN z{fyJ$rBm{^Y4y>ViR=c9x7>n%;KvQ zzHneuYB4V_Az;B006~y0(svA8q6?PpnvFb(j9*Lr(^3o!G*!nPyx*C>fIxAd9dJW6 zA~|=5ou?ids6X$b`$JPxJ#?dqM#<|bh_`HvsKP*YC}j#Qi~d**L!G%VYrWD=t3L>E zhDd|&<8@Wj+>;VEyli3Yw1-A_d0(VP@Hw^5HKbLhDrAJ6ZfNQm7<1c#PT5Ktv19Qg zv;?p88p>#;w4!l!M0jcguyrJR#`VsN;d{CFXrKVwfgc$*oncKg^K3~qML+-R zehDqC2xTMo2_KdjSC>=5`226jumIyIy5MV`(z^%N)<#h2jfX8R!(t6>@7zHjh?@Vq zBD6AYgs{@PjJRQ`X0JIA zo!I8d!u=xJG->Cr^qf?sLzE}1bss<>=sk(|Aa>)mwv!T*QI2%%lEs7rX3Y8x(R?OV z;}W!(+;*(m$6eRcLctN-!TSE*Q|=1`J!<2Pky)1ZycSAF5F)SNcKib zhe}l@CMi_*$KIA4UK^SFVyuRgLakl&?H77q!k?z<66w(wn@|(Cl8k&(n!VnjKGdBq z|E?JDd@|zSlML3Eq5V2UPcP)cA+0&;xu$IiSkVpJ&M6l)&flvODjZc5E*mg%d?3qa zda@8K9Y3kCzx-I+6s4xGB9$DkrTGgQ&`>ZyJ`UmwFL;tf8@Bl!@>zHaUdXtgNx??5 z)@3fyo{Y}Fafgu78VNYKJ)d9ri$5_QSXSatpa|NVe8w3uFP(C`b)^97Rb|&hlHvhr zws6UoQ_C-IkvAm(OlF1#LR2DZ8&stb@!iij%-^Tm(^#OWS)_8_{EjUCxk(X89XSIX z9Dw6C>g{#H(BrPs5-Mye+_XPtZ&u?&Vw5J+v+7UzcYvM7(R4;b^%7)(C&HfEuVQjqf}2tea1WJyA}#*xxSltoP=N6a9$s+Pc}@` z%&&`sgH@K^%Jl4wm5KoT2-18(LZYjakGz>l2x)5NNWw<88ZjTXpqK4z7p3jt_?Sbk zwL+Tp_A>NJPy>i0YBh4t4>DE|vsvG9lY;{1@e-%M+Alc036Fj;4prY8=O?WBYcogU3#NS?RQ3|tN37d1+N2OPU=`utF(Civ{*yBEp2SfP}F5y2hlz(vv|Cc?xfI5DkdH7ZN z$4-XuPKU+1=}$7+ZIIo*+nFwg4X*WO-`BnEZ)0H@N8JX?&?LLu_jUS(o{AafmPPCQ z;Rw{%K~bP|$Poc9z)nZUnXi|f@?g?S{sSo)`^4|KO~mcXEL|8usf!~}Ak`Yy?XiB~_d&VU{{&TN zfOJQHp|f|^`XRJ*n3y8@m3kN}g6!LyV~v6OBipB=gRy8+-VLi0J-Uo53g-RP#7xzp z?Mi+{{pg@a_9pQB;&NvH5|Wb}6T0nyUEkj(yd zJUl0FquL@{*RKz&waX8jlw<&~&}a%jYBRDLa-ZMBWIf3v@IEw64*D*`ux8iTSKl*m zeZt%&nJU3$t-dSQ$WA^KIr-V^KHiYnQ}T-UXv~4V<0>}(%4818^JX$ z(=va+an)q5heb7=lbm_5=&S=&hI>4@2lNL1`aL=rIL&~J)r<+k}Tm&`DT3g zw}++ot#ia1p{@S8iL3z(Z-*3(YO*f2*SOcvlp)tKE~l^lU*YUm$_sLD%Qz=hYc}_X z(^|^>Y%JZ9aRXvry(#HmWekhMTNv1*<>u!SK+h>t7({i4EBv&A*!q?L!Q2>KTp?cH_yk3KNTA&F{DUO<%M@wR3jE|sc3LzI_D8V zKJ2NORu_E}o(9LO0T&BQh3m2xSwo3@wvc9>-I|#B1QB_hc0)Wsle=%{bb-UEd15lbvK~kj=Yac75nZn?_eoO3XZh>~vF9=(16&Hz)ijt7 z1j2O9%Po_ld=tXx<3k-)U@&f%Ia?ORVmRXAj@W-gOKBuKOvft(1HEM z4VH91e*E6b%KD!Fyh0)TfN(@KKKS&Dp9Dc*P<#?gOWmkWIrHlkYEa)VGH2Vg=AkRP zDK-w^SDcn6Y2pkg2R-YoI=)yCLQZ)AllbT#?#(|KgfW6dw1uMz9H{o%512`*iNtDW zw@iZPH%d@sjm=z_CZ{iUhP7``r#(1!p~x@DJ|o#Atz_G>Rt*vPv;B=c9P)D@oobhR zO=`xo%T}FpR*Cem{QEt|^t;L{ubVcT)s{s5`!g!6wi4*Ty$m_lSs#zbx3<%M5=$L} z7O*y3hebegc7we$*RRayQRuf(Lif|P%{<*#n#v{*lGPUl4NSISerg57-!(KR4-bpS zV=JB`n9zWkkRlhp@l9U>?RY-ws61Z>Uuz4NI92B{orLI6K18=c(6QO@Pyjwc7`fWNujNs zX6kv|EI_3+gw&x!4Pew(>H8m@SUvuO4QQs~*~kjfv*VA;5jX1J1}$WptCq`zU;Citj{ZgH2p>dQ)+^Eid)uhzo_VQT2BzJKg(j#eop zZyK)+#9N-2sTC_Szu@}pUObbab?_Hq8fM2J3>2U*LHI3U$B%mmKvy~H!{x<~Lri{j z>`pQUf|$q!aVh3U(pefDp9HoX=_quXuh- z)S%&QO*m6hiTAOe1j>l_7UDyX4OG-{Px~^;Fu;A7HQD)Lr1+X^l$@rGOr?mE%3QOa z;+%84QIT*HT@@QcOvs}%)Nac9lvH`W_cWG%%s&--dqo2Z52svAqk#dc8WQ>l98VuQ zW^G?c3TLwxJxUUg`S@T?+WT2{t**T1KEtuf(edj_7kIr2Q)Mlt_o^iLHr7IFZe!>C zvly9tGq7`rwVeETy~N~QyQ7Bxm{KV{?ISzO4vo}h+<~>q=+i1(vB4He20{VWBO`}6 z-$US^y?>+-X;FTKM5;f9kJ1xm{QSs+U7+`%Wp?&fua;X`zSy_hZi8}hWLCI)2lr5A zc>KmeZoaJhb}-|M&-?9-?+tRX(;w6{t6u68B7z>TKTRf5E8>wqvQHMIH+%BvSOpx& zUuAWcIP6{%3^T8VW?%K@XnlGoB9R=MsPXQL$<1z081G3yW@0y>R6oaNo)lahvb+Jc zG$cDz6Tav1+qQ!5tvnzsBe-3+SkCqNsgS*+nJ^OyfGj_r zHSzttlsO1cg(p!~_J!Dv6rnjpXpAkDZ zjYnBH0#^V?E7~0+dW{p-V&BCd#<7w4^A{hpTj^x9$__LbjUu;`cHFt7z(KjqNZ;na z>Y{QrYrd56Ed-Y-M0g)Lw?4P5iC|lQJb#<5fpdHElx7l3DZepV8N zw?s&Wt6j85b@viSPo+I+YeiYPzISdcvQZv6OptpVb|(~mpDr42^#b_devk4sqX+7 zxWz0Um&bHg&Ay(Z?$qGBvw5mboR=p~dRO&BA{bil zyBE*>lMG*d$>=xlJSp=k2CClJ=mvW>q%y_NtK3z0K6V%qPCb2(de9yqql95)?q~m1 zBndVEqf@WZetg_V@E&|E;(fG2StN@E+>&TM*e};vnYh>I7WM>TcIuY;bvgA}mqNiV z2GHQ33rGu=}6@=ZDVvWuS@n09(|SbfPMYY#Fb7~CFC^+ zciiDNY*{6eGhC-3v*^V#2mtsV{^_qhu!9)sm_F!hDDwwa+JuXCDIfR)u5iEB3>Dcy z?R2D>R2uo=c6!BsUu9;KpKBq}l7ku1$DXJGCvR5v@3@Y3t~TXotIn@uHhSzvPQRxF zHtde!G=&iRlYI#PR%Ng--kJ@@2ky&3Cl4NNQ;8 zo*yPoigr@bI%IOgd0c>PZEU`<;Jxh!YbNa!n_;;tPJYozt4?tAAO$uqJ)whsDJCE0 z9=}y0AvNaV&KR6yQ}TWUa)x~!q0Z_@r8*Eu6i%UZ(~hpMkoby~v7=b^FvR?H&QV5O z8EuvFu+CP`#wSMlCPv_{p+5ca_A*YIZMd8HFW25E`5!g#FPHm23H5&`4Q@Y>yvdF| zJ;4Qu1oDPLmj7-Cp5G5GJt?XG3zJ^wHO>@WbwH0zK&I4by(g+3GV`i62Js9#^s}9P6}K>Of%Kf@Ad0 zOc0kKJ#UL<-@v4{4x6sHBZl9EnVwzCj_pycR4nb|(hIy-bBY?mFCy$TWH@pn9irMO z1hgT~S$WkLn=={+)61vj16Ck>z2CgAGMe*v>edeK1~Kxu zL53<&NI}l?f}+sWx!!+n`=@TdKvOxZXF)W> z9J-*qoxigf>ge>?%_uaag2VTkI6X(ezAd>8t8{!%$xUVM)+cs~uHrJ^fD#_JxZ&U^ z3c;Evg0-iD_D7}B7pLdx<;%sm@uKenA!t)^zBn9OSr6BW#qTvb)A)}DXOV+vL78fF z{6tybeGN637ffG;!@L|XcNXPzBu?AqMdfbu>poy(oBwk>5^IU7CcSx^>>(Vlsfa(+ zdlfTh@Otd3fCmGcE*>Dwam}cugojK0u&t`#Wu|yF@Q;%i5M}5rt8a9#O+B)k=%EL^ za0sp#s;Qo>1I{yY$Zv4>;v5Y9}sBItx6*+cXcaDJzeL}QaV|W z0?dKBT=d_UgS{X&p7a}-P7{}I3hszNEY|l?nmS44vBbkwR)X053 zZra-BhwieNykF;m0RoqWBWR>MD76CskAOGxtD)1% zmju?oZUOrZ41T$+cIDZaKmjaC+q%u=D#tu_C(#of9}UaqXly=-lya|<7N;R=I6<$(Gw@MoTWP4p<9Vrhxes0KIVs_5lM>z7<5En4r4pM54)t1Hq_Jr`^tVd=HH{h7I) zWET_8ll=jw8wz0EpNK^k{s?A&9pqH9P;~SxGkdPEISd{6ZLJypMFK?M=pZ5d@IAwO z(^}Gi`6SZuu?N2P&LH&pUIq4ca(qY%rS>hfaK~~N?b7k+Vdx`|x_Z`Q2c`?%oE_ip zerMWcLz={lzTr%PO%BuQ-|fCNA)ddJep?k9i=1tW<*OKJlSjw7A*@wW7Z>h6Sj2ts zK;W-XDICo`i0@}$kXo8rxl6XQQ9bGK63;LL4&t1v{?oM4{40FCbuPy zVPLpn2coiau1H*`3G~v?UYoxD%D?Y+gOAV?cPK3(5hCXF^WEq9I~YKw$wJz2KcO1~ zX0EAqyVrY>lQZI*pHzUUZOgiyLEKIP>GLg)QtVVg9@^#nw~>|ES#6D<(y^CUj!3P^ z4?^)Mx5SE6ABZwCzJ@$5;M2t)mRW>{j{;HF=w*eR^i$jCArgaDX%+?bQph1DW8%;N z)Cr@)`FhjS@FqK_fU32>R}}+IG?Q;RQ zx^LjcC}3iaPT9n@<)?Up4SC1>d3gA8T4UvsfaBOtRv5$ca~z5a!{bixS;;q}b4KBd zh*AM<^vjiYpZV>Q-z1g`V|p!(FYls4U3{lC?@Nv1pl(s*ZJ8}ioMjQywTMYn3LG3N zZqUhH^d6@Y?x1NYDVImKA+EbEgS^7wkM)+rTYaABOpwBYz~UVUpVc}i10kG6c>nT^l0AbREX@PikOP81pC$FijS z`?ZS^Wh5J4M^;hpbC@(Kn@&;Ml)~*v%-g0VSr`h5_67p8DSDzbG+=0p9I~47A8^($ zEk@_+)B7dQm7~OU__`sh(W`WBzmihv8UAIrcWKW`dix)j>%L2=91Um-Xs5VWogXv(eYZal z>NVO?cXRvh5;=0<03z!a`)C0qdYK7Gc|CvvkV9{aXFUWHn3!qzS+jaaTz01!#%buY z-`C%*D=xqOVU#~$W`1zGefk%pEJH~@PE%qp95eg>qm-4hw;PQIt6ivT#VThb9{cZz zQrc`@zDXdDWiK1KyZN}jZ#_;|hI`S#=cvIlEg1h@lNZN*>?A5dA42!plW|wZ(_Hxb z9gLBJl$Jyi(!03!b_ocQ@sIhLmnU`=w63kPMo~^7^*F&B`tJzkw-+iBY)ETG0hcv> zqrr7)%ZRbhyupJ=4Tfs%@Z|&3vgNkkPCBuZ&MEt}FQk=%?nkyI!icDeD=gihv1mGz zMLZUupcC)h|Ly|uZrEPNMz+tVAp&lB5f=UxJWc;!EI?+7O=1SodjlconT54GKPq(= zce#JI^g#D6%iecmkR3C_Oay%Ppfsn;=j{&w4Xrh^*6t&$tC|8F(8$Go;XPRNn9sU! z%dpoYl~iCUE^guN)A7P2XYaQdygW$;VEW}Rgs8;*trLq)WCF!l;6tV9QQQk)(!OKW zLJ--Qn{k1>E6wm}$*Mr*TNjETMjy8y70I+mq2tol{_69%920sm?9Po?IyQgH|uHv&N`?2~OKSm5J*gUHBhjg^2>-#=i z{VI59iAlA}`*c5cBrSQ{Z$7<$wzO~8dHuY(dRrse5$_za zjf>sAg_9>V2{EaH|HWQ3d#+ftY;O?<o z$ZPaxMDSk4nr4A5NgP^2{`;nneKrvP18;yi4-5E6lCG zOy$~02CaXe!rMmwHzyFfE_0Bm_9VPR! zr-_?eR9V^3`ZRt1ceG+U#p+2KTb#v(C7P*HfOL;bz{haQ$`%c1wjK<2^Ce6V!iH^@ zw4gHkXPs`YX35DLp3{8gg+Jbs%>8`My(YBsK$fF9-P*4eLY48x}* zrjOR}-0*{bdTxb}gQ*hEM^8yFHzntuLShs8_&AxbW!D@7qT9Ric$Ce|((-DkysCOG z8sd@$V4H~L582{FsSEg}%Iro3SQ_uQcp7d?#!{f8|r*=}^VHB**$NNewj(^~VN#ntz-2A)y>w%YfWUF~-KfZUP_S@rna-`5gC0e{=J3HOQH-_Lu+LCPiXE>8ui{A&3=waUgZStLL4 zv{VUU-Ps5yTl#TZHe`a94^qpsacAZB+87VchBvpm%+>6k-QBYulL|Yaq)eSUbLPXu z#?S6uukkqDWZXw>s}#_d>O0h+?%I+jS0Y@3w2T@|27P^e8fDhM(%8R;>RL-==i^#8 zd1#yG>L9z*7;E!Sa03muCNC~9mHC>6)F+(lw$J?W+DUtv_u9JKei^J+Q}dNXzkIqH zziK`-s{Dz9H#$)Y3E^>zwLWlYrsFe&GcmN0R za^8aN!16Q45!2~~G?4GLK}sh5J5ne|x~Z$?mKjB4Usrb}P9+)q9_96<%n#)1T%5CC z01;aSQGa%Y=dHa>>Sve6Gso*Z9Hz&|n?n+8!#TT-@`lGs=>J99TL!n)v{8cBoW#rw zi4!|!X6D$jotT+q2AP=?$4oIZGsn!#95XYc%nYOVgRR}2`7t$9)jw2St=lc>O0DPg z0RV$N;l=r8LS3m*H z=%CJ!j9m9Vqg-)ui+IBd>^0ycpkccoIO7@E`>jqreW9}NmD})EfC}|%-ybsrkWe#Y@_YM)ep{%(k7?bSgGkBu>^n54cW>zRIDEUwTTX~+52U) z(6Y0gHb1o3w_8;+sI)5-HttUL{MToW1u7t4uC?SUU2);z7j`+M4#WJ<6S~G7=@ZS?)tg}2f8;x?R!6556$?(WWBVXL#2dB z+ZgXXAy<{LPx-{}xuiy(6yxM4Ds9=Z1;QA%U%ZM#+pX(uZ>g>hQ(2gqJ@JS%zjs?( zo3L?k@H(j$bDp#b&Mq}x4ltk^YqDmykEJ$W-|^@@t(E@%b8y>^Rmb=$dL_x*yeU(2 zTq=!*Q+#zr7$bTbSPRSl_N(qgYeoJ~92M1tRYi1PX7?_98CD$FBok1!isH;qf!%T0 zH;S9PRe0sHi`_>o=5Sv9yuSr)(UieQ3?(Ko9eLe+ROM@2mO-4r@fwl(CxFVgCS+kF zZ4C~{0vX%0tgZOk-w#v>I;eW{OvCp~)mcBHb}+vvoCG|6McH{vfd~*hj3S~FAplhm zgODatUbnvq3p@;KHaeE{a1^H}y}6Z8$t?vd!0h;xkD7p!{5P~fbgHD^ZQ|8{SAm%8 zV#wTB329JwbSHR^S-5ZI)XHMgOe_2$+KPgsEcMdK)jlc7Z)pr{$-ey=&|h8K-wppV z=YMLx16C?scCCX)T{+Bq9}}kpp1chU97G?zv8ZP ze7B@#g@fE9tPmynF}GPnv?*ok+{X?B^Qdcv4u9`)17T$BbVQKv(!<0gVTCk--;Fh$ z()3}JrKY-~**-6cMGt2Q_nx;KoTbWh2pe3O4%( z*0J*9)lDYsj@Y}>=9#$OjxDEOJ)ISJN}_#O&M?Y{5h{7#oTbEmw=4+J-&+dYd$kpM z(`2XURvNQzx{)GsXcmWs*$D$+{F3-3L$m}_++>Sthi>xZR1r?G_tL(V!y(8Rz7to| zG!ie;_Y{h$z~ZRH0uR*P+m46oN`y-;Za$Y3`-;1u)6*3)FtkNMF^caRsg{#(qJb{h z4=XRDQkj1VfRWhgk+A>|^B>y4?-_BV-^=#SqbpNVuHv{SB-8|!G92+H%eTlU`ufd> zt6XRMupJ}4G1jC)0iB!uJaV-xlLSMf@Z%-L#bIzjw)*(5obtH4!-XAi2|D6_YEmu} zq3vE*%CZSixzaxeBW%($d;(E&hz14nfVrDi-vXK!lorJA`RTC=Jr`_Z=0i#6WhLEI zmh?=^Gj_K^yj(YSw}J#hsR7^AbdCNkKU@dLMEH&LZ+-l2ZaQ6#qNXB>s`7R!$TXZ{ z(~5ei8q<)s@}M4VOG#0a>o1a?d5cDJEY|dBV);`rK|qJmUpHH?GK!6RrI`uNmc;kv zBduT6!qiBXKBK=xK98@w75cM`hYWPM?%xi1FSQCL;f|OHvZuTSj{UK>uG^)kpqU!U=S`STaG;KLl;d+!1~!GWW|PKQg|%9p{|*n|w1g9onwtX4cN%l38t zpIt3eb2h|f59efkw!_lh4Ae8bt-5vg-5jsO&8j5HZufmgq`}2Czd3G0V1%81-DhF$ zqR0%#*9Hda2r~GSH;w*Mvr0{q%kUUsq{ILE1vz70z;NtTY}eaIPP4&VG13$+j9|~v zerCGr4TLa|oTI`vdbjs(#pfeqnUdL9rX>nzeQlPJguo_p)odQDj4SrMqpII zJ$YKT|2u;q>0m%Mjx*CX{$8BAzl5h5#Z9M5%e=q;5T>I^Zc|ATQlV*85(GyvBViiU zT-ut-|jC(#i28tb*#cT$ad&sdNQ~T-kx1HGi^va0w#DI>*rHdP0n~R zNu-bY5#PRQc&@_*DX6*^$ZMBdC@Vk3Y0qnnv5~D@=CG>M6Xg}wq|T^%_ic&L zC5;V_k4uz7dVWnhjUx&|B_Lov&)N8l!PdR$WHCKeT$Rt+PAnxS4G&M=f>nq~Ef5N4`l`}yk z=V&!z!8xI}FI}0+-RPl5YzWRdcc9AT8*S(K*=^OZTF|q1u|yaI*YUC798-`hg!l^& zi%l}habU2p256wcyg8+r+Bo*1*hJ!O)_bN<`szz(amG+ox3|F`b)4Rk06 z=TteeAHLXjskyf27nH0;TlsUt(#~5j9rL2MPZ|}@@93Bq2@R}MF854y=Pc!gUejEYDt^Yj!>5fS+zQU_BMZ_B19TfgO z1`Z&^1@p*0UlzPDiGmv7a~-F5SWsl-i2KZby#s1EXM#)ihu-LaVwZaNd&qjljUXwJ zvMjg!BUAWE@y(4lNy-wd&laM9#KA_B=#R*!F6@iae$C281cvLGc^y=jR!`V2Y^zjL z;^Hqab1AUU_+X4TdIVGxyleRQZD^q=sQ)yh^RI=&UpbW%Xu|)*V|BpkJBFiTVPDEh zTKGu&F#0L<1)nd-{H$SxSvi>HO+}_*6qX&S_`^CoJE!1rzrH$^S^uHCe>}}9V((|b zA*+fX3kWpnfX{;r{qCEkLHPdMP?4j{AD9_pd~#Bjh~hzYgNDY3_U`V!OoDI%np+>z zf`ZeoZQX$NgelJ0e*OzD?d85!mm|=7*m%iPtNqQYs=r&iu)gKl)0&1J9>G2?43CyD zhkIFkqI`V3cN;Ojw6tp(OYtF?c7X7KRbeI;$w+{m(`}{+t%$ZyLw-cS^it6GwSxB) zMfIkSAWX93ab9G+vX+*qk+I)MnMHLn!U}W3IwlRlJo&VjFH>(w`~Ax!9@J63&{@hT z47Yft$2w!(TI+uP;#+{8VQXL(wXu<95S1kXZr$ow3)X2T;Z1|;XYRZ!@+1 zhnriEy(KTGsNq;xo1eX(F&3AW5D*Ze(1Jub)Qy;-k>&MSz;y;+s0eX$)0dCH`qqgH zoFtTbNP+PN9)$5Dj1Vf`#du>h@&+Cj`sg>}lm*m|-K0javl0wl_nAh!&f@mX0<^l`fd3qL@cf4Ixql|7cFQY;4)1Gm$$#svJ_bRB9=O<>vDUV^Rt+f!7FBA>f~Pf(^BNx ziD1a&Kuk&gWXc+Z=Rm+?l$R308!%%C@zfaN@@bLQBQB@E@gY-JUj1C2dDFj~6q4Q3 z=Q3D{Y7zfn0axgwASs&lE2uF*9n!P@Sg%6GFo}bxAMM4cv%!`oXq;>2yvmnNCan_o zJsB%zIFHjj7p3Xpm2_TXLS(!v#!l3}d>C)w&AJDtxZth#b=_IF-h9$gUGflSVDY=F z^4^(IdXi6V4E^QCNJATstZ89GHu-{%g3S5g7pEm3sjvX&a%f-?Sw1g zowCdSotKYqK?oJEXk*CVQi7w5o|lJ#CuZcyz7Bd8&AEv*F*|$HdsY7QH)_gRVcv?< zFN%C`ujzpxWwboZtzoCfi!((h<*vtW^*w9os7Vd@)v&KR(Z!1D)8;|7BvyNvSB(DVFY;&qBJ{;g?g zPv8nE1EFl^sT126H7#~_=-Q!x{9GE6@?)aWa2U}vXQuX_F^(s`G-nO5xKqY)7$rIfeT zTqbe#fUv^Is+`IX!EEH^JDm9_ z{zxXi0E$*LceUS{dl&4rT_iZ`cKpVT5#{rNMw9H{!4gS! z{2~v-=f1ep)HRUqGC9}c?2v(htIWdO(1)4r6(1!Ji!B^R09edF;QWDLq^Pi+9^-O) z>cqxgCtZ@CRkTmh^LX|kkZ!}%zW3mf>VtuMMmudirf7`WWm{4?$UOIkDi*}7U zOUO|JSydopcl+cL!L8Rrr)@k6D*2wLi?FAXpJIxZpXgPamU5e|ue8<&OUrijIn(DyNyH+&R zYaiTnB~>n8%G@0_^6RUJvw58RJ5|p5B0cpl%Il-YfA_$$fbVhc6OR@;HPtWLdQ<8d zR+#nclNcuup55Ch^zkzHi*-!l;e%E(Bz|&#L0;n`hCrCiD%V}BRr2SX>#+mP0vkj) z-?iri;tC;Iw@1$#^_4Y{oxe1be0VIz#O_t2+mx&-y~Y}-#;h**Bk-|xl$}iQIHJE- z=ka9^JG#pVE^f?Zu8q%reIR*8YwSUst?7}>>ok8Q>Ls#J@4i?=*)yzdW1wQ@{mw#O zkLqAh;#}t~I?`I3=3miQP~B{HqE^3S+yFZPrbgJGIgRwYfrxI#BEj=HER zUY2S2Bu(+jP@C3x;>L9fqsilg;PGd^B)}@9VvX3r!7qC9X`?$10RD7YBwv-@GI(h* z747b)ZNHrrYblXW!w{KSjqD))d9mN(|`9U$QfWP7|B z)%-IsA&Cye*4B!qKduR7U?u$iWX4Mh^vy>u*IF-a(>OC$Q=y)lD4x$Q&m#h|sVnj) zwp+O@NhqvEe!?Agr!R9+ydZ2iyjQ0l|7P&;5C>LOy0eJ`S<>i6Pfw9^XqY5Yq5%xh z;2+mtY+#Mp?tpjVZA-y8&vfa<=ey3&Wy)KN-0%0QDWTiyZc@9NLX1rDfv-=U>T{8{ z^M0M~41UFw zI_m&5?X!jq9c9y8iVo@9^YcnQ$Gq{uVXyrxXkl-TW=XY(gofS!az@KBGaIWmqpTLq z`-L44lm~f8Je1R!b3yblwu=u2dHUSW2}}Xs;@IDW)Yjr_&(+n;pGNI>$GjqFm|8hW zF1=kHF_T$dWrChSnpF8e`MI+bf96+xSJ7D#;wA{aHn7h(WR;A6J-Qe4OkNzb#{uR? zR$M*jx<>HHs%Krl&tD;Hsjhx4bxCYL>`NL=wF z*IG?!CeG^y3;*i?$v9nqXojwIc^-6Ge;V!+HHsBs;W6(CXQ?=LY6UzDU3;f<&2(}j z^&oX=yfkN%5?C9^mP+p~7Ez5=)-0;D*L=`^4}xHJ`4%`y`K@#E_RY#2XKxv*nqIZn z?Mb+(sOgxfK2CPck|C+TXUVEU9HW%4*?n+!dYdK4_t6;Vv*jVQ+S^=>abc*OAvS)T zD*|&x>X>i*VmGn0rj`39aa>~YZm>jp;&V*~t7)BMVwJ;dr`Tof?F+=@8%!^un-gwQ zOeq*eYMddE1j#U^1J-_=ag%kjMjngQx6rz6K!ki8p>Nj&@@`GRJL5)xyCg5K^n0|{ z03yKI+M1`M{@bP_P*E8>f!LtFYc%dM=gin}moSSIOr)2A?!p z6=_rUh)MY_eNpw<`#0ZqHmfJe)A|Nx`iS;ECOS@Mu^A1qi3)+Ram67s?46;28_@Bc;3M<^ z+)xCMnzIB|X^iP`wX)2gt+@MJ>Q)o0>#Sq@3lalJLgn_jF*{dTR3sTmpPT6ttbq^I zaBzmmgq}Zfj(YrNG#)$pB##hv_ECO}cu6>}*& znr0fy-L+ozIh+np6G0*Qp|{p$k>A+TPdLuWnhWGmg=xPr*2V z0pRKmald&~r3#h!J)^k$j7o$Y*)#EavgUR_#tohmx?Rz=y(0nC$8@ntYfs&NUMnm0 zuf!DJ9?wVafUw0!Oo?TFt$FR99DU3AJ!sUlG}ztLr9G|Nogi(!ggsYE6j_b_j2dc{qM!15JYMXki+hR1JodjV zrk5W#Ik%2+&8lOxW5A$hjXChJZiRoW1oTg;SdV%dYoXXCpcP?-)h}Y;yx4oW!5Ofp z4gFC6?F(>YqUZ{kmCzYS;$o6q6w@biy1&+~c*0v##ne8WX%a&5fugb~hTHS)>JPe5 z)==$q3`5x4%bV+h=s_JFocfqR+&k~H*IZ{O#~yx3%8>%Q8N0i%zG~x!pgg({lscS;bl|L<*ugUEMGS$Qa_2=@sVoVFa(Dz+ zy}9(ScA5TF^&w*Zr@s5&FUI~|MgFHo`%e`a^nRpcEpAkwxw*;vxqui^j5GkzZ~Ny5 z3<2gO6ui*q4C{EYoNuTX)dW_RM;Yzn&g*?)o%*)i$4m+O%+m&D!Z`-N6N5w$Qbu9* zp9@lU{_ZRJJ<$i~9uqaN7*=Y-!)~a}wHsng8w3gGDMmp@auYrTQ&sNc!LJhr~ z$CcqAGzE72bT^g{em|;QN|s`dQMdL;5Bt)v@s)tLQQ-V!DX1{_FdiLpX{d-i8KWoF zTOXa+>JXi5DcY?_qsJpJr=eg5^<7st64eweGZmuZ;iS)c_G9!Xd~3{61_)c1_&ie4 zGBRWgzPSFnc?g-S$XQjXcX}PAF#Xu)-yUr9$;;~gYOHVIkFw^sEPstuQOX=_;-$gQ z@ho4!=PVP08ogdnU+pIU%_kf#g@8IV)E$4!ji@C5I04j(xdUoX`-|A!$du>{AKkdc z;&<({UaZ(`9tCXXxuv;xg?A{l*g)c2g{ap*0VmhE6_l$V_u{7(002Qm*V%`J3Pw54 zUv|l!5GbCaNb=*|U1v(2#Ze zo}GdER$Amqu8o3J;bC^i@Rf%JSx+c+n_Gxou$h9+xcaDT|A|V~bf!FS_~jyWSjJVr zEkMiQQp)D0$@_d=e!Q-NgqDN$%-dE*T3X|;-_gy~3YH91PP-+x#=-t6MD!DHFz;jB9m9BwUwK-%$vRCCvTP>4o|!+H4S z5_)1a5acard~rO=C*bkni@R^6-gi;xG|m=J7tpjNZ^&F~_2Llzxbt=7=yj{-ZjGKh zM6N9S`qL-l)C{EC=X3be_ody<4QiKMIwKxx&Raf#5Xm?=VI{E2?EyYMKU&zn*F{*s z6RGK`vrU=xgR}}rNt4rN>E7-3p!Ca1+vJSX_3NUbrq;;?PpAz-ey(Jjy2lj@P;%&X z>mQ2hiK#a~v6dZ^p_^QH{9WF_eEp#2nXJ#Fxz(5N-lQwD6S^8<^_8D#O5t~9neWY>IC?8s zQ<{~`bQCrW+*C;GB4TEY{Q(Nf8z)hN)}-*Xwh#iqQ8gdmN#??7TPk0rFll~1Lixe^ z{QA=ZjLjsjR9F><;$H5VJ+?hoj-i8v6%|ym1GUDft>vUR<52oy*W~vJjjhm6*IRL9 zDYeNihMFey-21~yV59c;@6$cObDa)ZEv2)&8A)#*5-_e~jynN!hqL^4U%w^Sp!A-l0(_ zO4~$p=O?}qT6Ue8vP*;q==Hcmhba4cXE?w+jMWOffG=44YG36h{r&;?tC4<{-a`jh zwpN(?(lAIz!Vc5%)YqBDH~#o|jDKuyHN!(Z{)?o^ULp*sJ6?gu>$y0Y`^SPVXXjX} z(KQaP>Pc2;7BysLHi5!7g$_9oJQN!8o1ltW`y-Iq$}i9woRIbq)Nj>UNjf&24;#pX zzsK|Z>ACYK3OyK=9(vb@yD2k@NW5fuzA{qMf&Cv1Oa(ffJ;3!xoyesLDc@{4p#;=p z;M;rf-YWICQ!{MU8Lf@o2c;4m?DYaGzLFf~dUWeB5lq%oZhN0W{oYWBF)M39wAX@?iPIY ztz0n*_w5L|6(CP5eD`pMcXqBM0R&G!^_7{`MF_Tj}7>(y`ERQ zX<^(x*AmZ?0BKNOnL6o^aE@ip3O69TrdAWv6-p1eN} zM%XtYG9?Jkw|p^`)Mr(<#tiQSGRWaQp?Ok1@OSyt9B9pP-WzjB8B66-U55kgP{lax zU;*2Wx8fhr`U8rO+20Qz6#vDzr>bl6>qGb*qi10Zmg)civ%}9)z4&mx5y7MH63)Gb zXosYcp$Ppu*CfR;<1#APcGM50@z`YOJjg`fY;?^HF$|SJU@A>GKty&$o0aZm#k87^ z^ZH^VbQTReppMlf=R~YLqiGNS=}h9d!7fKge(%sh{ysszZu-H9A*(2PUe(+7bpzXH zWz=)+iS2MoCFPQ~*5Qa;GQ)-3+szNxy1ckbRDi~OFb>%?Y~)K==-1(XeIf7I?DuTH zX?V5&tmzf4bF$A|w>z-q6v6`H;^N$$PluQD-O4E5jGYLkq6n$uvQZ0|xk{bc8U54frOJU$)f{1%&!+ zC2Gzp)s(8=6VgbqcA;m3oSPG2e3PTPe=ax!nH`0ikVjBm8=F7WD)h5FYd!h$lre=u zQ=%^yk$}mQxHWLsXRqjWSfVQ3Ep}OOg~!%s!@-0|wHTq!Yv4TF5^N8Ev54sKM~@Q{ zRq_yO-FgCBd$e~fkO2GX$yHDUX`cV<5H43*+v0ohbv$X_jpfb3*Gt5jdh^31m&k++ zaoCQ53C?`pp_Kydgv*aHYYua^nJ!O_ohuf@2&4<`W%#}3P^{rz)9e2DK=J2 zc+goT4Nh+9zgXPwn+)dTViaz#MMw%9QPu`d)`dWwTSa z!^kWjT%t4MyoZ5!Pf4{$ea(4Eg+}$Hb?)ZcEv%x#e6VlbpFh1H)c@31J8E51clRP9 z0bmgFiuNxU90EwXS}dH|t25i~gJx4SyB)wcwrKmk=~M57d1o(D6v|D#y|bg^z6G66 zm}7W&NU?qD;JfvYEImK?sTHGM+*hv%O(F161j5;Dk5;$|*jAKNwm(Da$I6D?yuH$o zcEc`iIz4esz}(zlV=6%#lcDGz4%Tq08!Ht=g_pwXv%(M+)i-&iK%ydqX<{ zjT~fC=W>m9gXT--c4`)QU2hB1TlkzP*{-)|o=)yYTYtJnon{u}h)q*Y1zy*0lUU&7 zjapXzT$N64&d+t_^oMLVO7t+FOrOrq%qr3dcib*e9yl!y7)T~&F^>2l!JLxeGp)89 z3EK1&Ki3QL2a11{k>%u=-j7N8oNr{A7rNDzJeq9Ur=vU}{#Y3o?BOc#gFc#c!{>Sy zS^l=@K;N5EC<^xLN~E=B_4Yv4XrthG33n~E+Q*N0) z&xt=j>i(V39#mDq*^r%LX=xFMJ<)54@LNdKkh3a2^A)U;&bMP&X`=49wNeS{Px)!X zF!4oNrl%**xx%SZeX_WCv{3`qhzj^lWbgZl#kV)bFy&h#$xVy92}BvWHeH!0ZHj9C zZRG;xjLHvbY(?_g+q)tMt^CsaDL>vICb*O+WaazIUZI9P;RsgnP&Xs;f z*dZDj%MSEs^OBm-anJC#?kcz61PPal8Vc}n8Wp|N1!~8lsuI-?7a3j5Dzver6OeXu zXiYTQp&B@4zs*^mD2kyfSZCaob@FOiPzjS+Mr#T=-SR2b9n(lwBFSG?d2EY81AM91 z|G0+5M{8?(@`ppMyKYWf2S3k1TnrgyiHoqksYy(}4S$vPJ|<@~Er$cb2&|JaV%`B) zHy2kj#>l9cnx>aaHQDHt(yAcm=#U?azZ3F)jOBe41po)FrHF1)A zmrJtqSRIO!*pi}sXB5Y&$K3{gO`Q%WzSTUU3ur^pJ@&qV;M4VtUh?vkdj)5O;6eOS z(ji8P3led_N+@;S`Km`m++`i!FZ@XxcOEXhUZ#0B%b%@oOOGU8*v*}fGzy&8Bgy_M z_VG=M^Ux*mB!j-ssL3L{oZ>_opbgci(A}mUG3-tv9OcU~hj|j3)q1z*e{QoD2B})S z+chx)KHmm4I_m{vmD4bXFKfy2m-!D;%iK+BxE`Osbz z?Srm%8Ab`in7fiV&yk&C{2O5y07V}Fu1MoYWVElDxs8)Ko$iO8iEgS{vqf36g{8;c zvl}Z*(jnO@kA0~4-h>)ngTuNaLHR!5^Wrpcz~t{m+8S+tbl_+DLmQJ9(s!T7m&zO9udc?lIV~gqws8`-qWkIQNsQzXP64%gSt?T~G$ny+&S@Yh z*!#^*g^k{M+3y?P(@if&8yJPDFDh4Z>!Fy#&c(>D`iAin(=ijv#!-AD61_u@AfkNgPiA09n&y=ox|e}s~G=A+V0kfLIz}36>7?R zQgGzdaOv?>HOhaPoBf_gMa_t8+3b}lz85I&iP5 z&+G0Y5EW3T%D$5LYW-E$!UX7dOBH*$s(Wg0kF_c3e0Olw60P!7bX4fd3@go7xywcV z^xW)3h8x<>$}4?jE(^y=N9E(P{0dFmq11^?%ob+^N}HN!fkdsPv)_jvq2`tpS_*4| zrq{dXyTt+^d-nTB5bIs8T7~WVvokwq_VlX1n(?zJ**>QWf3;-SV>DVl8OgICwlRy! z=~61X+@o*Cd%=Yj%HJ12nMD+3)@S`!cB4)zjwFy+f4Y*_q|a{odEf{`IYv12e@WaATlw_>`>BK0-14vi(>3tJ{q&y zeo)L2bmS%cq`{-&B*DY;Rf?Ly&dM0ud@=f(am&&BR6{?wNiuoaH!dMzww|Qq^O%}O zs@j~S%9QdBBH+zp2$uNiyB+(M>N+m2yOd-vat?B;p`8D#s<^Jo(Z>)!;xo5-esu(V zd0nO0oXKJYhmu=^U>~`l9ig2GQblQU%NtB79L`zQr)6ZL+HfZ$w>2(>0!Cf7&h73l z+hz&{;yR0W?`O4bZ2)#=Ybo$~{k{hwfbi6fz`H3H5Q0p_&krlz|)@ADX(e-m3? zy&JP41;k>Yxo|-zf^Dbmf$(jg2Fv>>VSUMr&$9nT##d6 z9Q3ERoT+8$PLIt5N)*s%_GyIo%R&?L6R{V;3?fsf#|yn)`+q~Etzeq{GHdfc?!`~hP)cv7@D#;JL`*f{>A&TsMu<{A_L`WB!%88J5~Yf$MN^6&LA@@ zLzCw0t(cD3>FPn-gTV|1az(k$i+jd^94h)X_a90bWNp)P^DN`muTbI!hj;lmBQijF zmuf79cV=LDV><2kkHQIMch9+51(U7RUGO>DFf730hA_Gz0}t3+t}KOt01OP=z1%0I zrguF5AcBEGEJy+^@rvKCE2*Q1Tgd~ozNNKVUSb&=@IPjdYRitg#+MmDpt+*8f;2+J zBi|41^?sv?L2IV_$l$^3)a3@w!c@p9+w0vMawHWKX)sbr+5S(pr#j^N99`XEZ_i1t zwmLqx>LU&1rXjA#`t`rH)7t|}1zh4QJZ*dKt{GMGka-$T^YF5C$K{+O2*QYN8(M4A zup1QHIH3nPPmXU!!4_x=6P+c;(@FH1^>{XdH=v^(B)iV~m}gHSLI8NtHI-jR$@A3Q zcfUn08jP~}35lN63rJA#w$UrsJ8HNy(O<-uw3i5$N{`j{A;P#hyW8TsB^1;2=c;%* zlSE#n5aY-b)b?u0cc*y}(~fSqUT~_&I}=oX$W$ zhmipuK$LWU7SJ>pL5}RV6W7(nP+V?z+Iaju%-6lXaG)YUSUf2@yG-Zp?2a3=?0)^! z+l5HW#U-!WZMKC~P&!bc%M@+5ylnJt%4t+n%xSO*zM}hE@lLN>^5NmoQBmL+*&N2+ zz2pI86>%ALI7fl^kVsQ|_Su|E6bncW!HOUqG_xu+Hw?CZama~y^Z3+BnWqekj%LU? z|MqrqhjrZ_1c?RkFThmhlh*pOjLf77S`5jmG-8QiPYbAIk=QL6cL1Qu}cg< zwT=&o<9J5AOvm$!#!W_Z^v^Lzvms;ioB_?uh$CbqI)lRqP$l z>PJc7fJ_ssV&x5$8HCcs?az#t)S=);7JYs@S|AsxU|(KWznuOJ;JjP)@|NQ(gRx1u zeFl%f;NQU`Uu&3&;e$@0Tsi))=xwS22e7hcC<<4JXBc>JaeMDhN?qPX;^lUBEhIjE z*HC(&pW}6TI3xLP)^UWJ7;eVa;moZE4+9fBAAM?D2@fuG8^){FDssZ-n|5C#9`IcB_uA@-%81n zygrCMCX6iH{WNv1teU#9ONMdzNpIDEZtrExI(8f)Uz+GFi4XiNC-(~HIS23J3I7>R0xh@_V;? zk?O|E!Pe1p%=;0R*;q+Dar!iuy;Y12747S2fIg@3gnaMXKHNv=YLoP2w>-D!j|x@^ zYccBeT3YQ@2>%BGX?h6ul_4d}N%IYEij zVPewhk{U-Gahq%6VUB2*p~c1iwM=RQJPq+gF0Hv*={eYH6l9OB8WGP=500iF1McGF zerT+=dehMk+)MoWlFN3mCP+orAM(z%#r!(iSccXO-eOQ$oqck(emufyyu1?)E020qU5CN zaXsDLXVwF)PKAD}e)Sef%{$(%7cuF%h>2A6Avf%k-VdL%8DKR3giE~}5;=>}4F?MV zu#ja8lJrq4${th2sr_p0oFDujvzzU*%Tzh=UiGKEKibb| zu^(s=6I#5u#_AgE^eaz9YP%yd%V zWoiGLsJbwy^x_Qj-}J1eY+)xusdi^I0-l%regikr;fGJw3mj`rY(KX8TxxwhKSTx& z4DWw*5en}3SeXR3e^#Sp86TLiF?+&uc6OZ26t+gp_YuMEN2qCUC7nw|h4py2y63zL z^V+Q<^pXqJ9`Qt;M+ptBj24fQ@~}}~K21FYNk3DVcpwEJMG_|_`q1k^yxjU-dSj_; ztd@LkhCx*Wm^Q3ZheJC!b8x=NA@-XoTt;|rEa!8nq!8U3Zj7ux%btaTdD|?o0QMP| zL-XKtf&DfwNTXySchi(W*#n=UxT2hlOl43JnrR4?{q`myUGrO?&#I@FZ%*r=5}EBA z!u;r7bzBkPcPyR?%b&WxQGMrcc$Si4ad*CkhNx^O*k80bW? ztwBAUcE{}HWxsfx_xN0tCaqbkWP_p`Ax#;2=7r!nH5|gOCh>Oa6Ae%a*U;?N(`hpj z<->Z%#hic`OVzVDmQ)wEX8CvMJ) z4TuK}7dm;sw8WNjqMR`c^J~5anxY{mJFx}<@#?%Zq?C!VP~U9$wt%UUrY(VqgE`0$ z+%IE5Ow217oJ&7TE!W*;HQnkUHMv?3n?i`6P^vGuP)OGakz-{3JYJp}MQAcpVNpGA z1w1r&A$ooNs^Q&Nn?g#N_k+ILSCxeTdBXK3@VcIPV${{h~ z-y{_Q=DWP+m1a?aRlGVN#r<|**EZzEBEc?IXZuPDbLtt=8~e8@8G_iCqLqx!#Wv>Q zM181(ol`>dq0+kQ+(q?_KskCX<~#CMR6Epv#o$WW-g~-)dzSp>P*1mA8ikMx?7{{o z8P1B_VprMlL)_xCTxl=iFv`wBExV(;)OOYU_tZEz+6!290MK;sDk=;?SqqdtoT6Kb zcm!In|IX@2``6MVp2Y3-@^@AeDGI3*{){aa)vjMVy` z7b-vxc8nlxo{=}$>21wZPyUQW@t!I_W+=OEX4yTtp5|Ate#$EIKn#MdpU}6TvBi@gSxi=dE5J(Q2&0pLWhWMmDQyxCTWx%V%6do zhuO;54eov|rmLc!pC=|-BDi^2%Z1pxRHS58j&7ddgA-1jT%N7?`zb38pXq&GGbzTm zOrI|ePf6KW>sZ+9{-G?LiT00M$u;d*3)oe*Of@0Q+PfOeOf?r|=+$?4c418sI`l%1 zw~6t2iFv_r1B9kl4jfsJ?Y+>yBwXxuEcIy$VN-&CdhopIm_1u++{v(srAYu`?{#A_ zL*hUVU4wK})Nt^og^&r3D%ZkgVBe62((&$Ta-%Jo^_P+={e>>&3?&mPPM_7VMq4X} zsOpfZg*RPKJ%SqNUC7$Up7Ib5Vu1r`w3o$R(jIc}Z{cdE(+3RcNSwr)vW;mn(C2_7 z>sd6{ZVxP-0K~0)+34bA6Vvub_vw4DP&*)Zu}iw{rmngU>I_9cHy-%e^<#}oqlUmO zH}}wp^GcE0*NAkeSLr!kbK*}dx|wmx;)@bp-;EW|UB}g_uK{>UEmQl%@vmunW3)Pb z^9^B@_b_ECqh#w=WhEt{k&%$Ox~_l#$+vIMecG2dm#3o-Z*<>^)A6yTzq%$^{}+|& zViquTqj~v}@kP!~R9*stqr*l=IJUe&E@n;}Do=Nmmd$G)n>_FHyos(@s7&kExxMvR zAvkwa0y_O&^NZYQ1jXJm!!OhkA_|&XI>tX31%!;tgH1=nE4B{Y&oEL-9X@ zJ^z1Q)&H8x*VUoNJUKZzq|f~O=f6f={k;^7*E^Gu|3~B%bXFnYbG2JR{>J?hAN1$P z>XZN2=53gc84dBj=KVjGnZ2UUUws!^i>5k6=TWy-G_p6IYgalS`Nbtvy!g*D?mn#} zf=A8LM$MD_m$U6DYjFM~dP7!q^)|>cM7rPd$rCbBihFKB;`HV$tAAgGEMU_qf~K^B zW5B39=CS#=^%*VBj&>>?wL0zY4#}yt#fc;6%)8qIRAQutZf)%`Jd9ALfmu}mx3|8r zf4e8uPVT)}@d{jP<#xW_1<9PyuZHG+!jFX5(7|0{zo>Mb98g*0>$qNYJteE%<=N;2 z55XoyPs};zIZo#UpF@zD5aWWO(01VSRmhhShm;<405H~gTY-(-Gjn}He;u5`F+MhQ z)j&k%fep3i(?=9&C^6ys7NUE4`QgEkiY6Rai2(-iC30Pp8gra{gZg$1&;IMl6n>Ub z*!p%6HuS#{K(Sv0c|^Pj6hJU-;0+a1`rgwVj6EtyT+V{5xV6O=0&mQbMy0;a^4T4Q zasTA{g-NU-uclz0=vMw)BlPRVLk z#&xpK|0wLNgQ^JQy}yKnhyoJQNT(ohKsrQ}?vM^?q>(%aRHRF~LAsA{I3RgwknZm0 z(A{v4ckcVnoq6w_cW3|E+1Y2Gt=Zl0^ZR~3n6V>BS8lTvX)EXbsE{+SBbA&~?`u!N zC+haPx>6YP&PZaLmMB7Q(5X<=j5W2E8epO>^4++x*xGbP$xSnsM4E5ISkdR*Kv|F} z%BLOP`AvTTYIEnC>0#y7Gmtacu@*<%j%$=5{)zcRH+xAjySW~Fj_a(JR=gHTE?O2Z z)A>pn_Fmt9qnR{Arr1gOF6I)U-wUc;Gvm4Av>LuLTBBmeEhO^M|j%1c}H>}3xtv1 zJ`-$`+0^;({>CqQooJnW&Vd^rd6}|a7l~CaXl|bhmk0!fQW>I)HzK~r>a03k&X7<`PEcYU+56{++6K&hl-t@Z`#0C@n0i;pNUFC zD;EX5H?^YNM%&Gyh25bU&WpxV((h51@JUfE8b|gUX&uV8xL&2RgHPYA{cQCz&b75C z$DAARfp8B17RBJC$uwBA=%RA;K_FRlv1p zM%&%+zy%82_Ge6L)_L2wKy$kaX?Y&{2GG^6nObM3 zte>V92ri*D+}G&a_84Jv(E~fn@h~z6L^}=#mFO4GU7n_}pvd{K?lHca#Y*$^YDN{r zD{LbldvjUc9262m?NG;L@ePFF$)vquA31o|m!Tx(S8ZeRTG#e{iIrO;xs@P_f#Y9c zes+644>ol(`RftJ82Vv$Rt^K;nH?Ps+t4p@9sz@CqV#=*Cm-mkfoH&*CXl{>lYm74 z%Y>?A^s^_5TmX<{NZ8^Swd;O4Bl>3T${e@eVjr)rV3;8<>EzrX*w;G=yFbz| z6Udxr3ZcCK0C_0`UC(`8ERS(^iK0c7rJq z{k#H>+23`PUXzeDV@P@%q@6v!$PGvhPwW0Jp1hyYgys?T!*llpqK^S^A9%*N7c@dB zYrR5OaG{3_*gMl}9;OkCIYEFb$-||Q!A{ukp-Gj2;xL5_&rADobgEif&x1Md0nxJG zUoTPq1)*ztGHwG(#ImQndLi7ici--18V0kh403ShRN$3CYVt+(ZLEIpm6Q=Ql#Y>I z^l||0k=lLu=0)4A)`HBVvUGd;=HB_m-eK8avytjzn>Xh-AIZ6Jk3i2mlbV6+fN@bU~-TGG9I%k2e z`5Fx!mWf8dQtWlsAd>&esw?o!M`dB#3}PN!U^hfTrXi>7y-{LnJ6uEYjx9uY6=Nq2 z=S^wF87qVn@Nek(z=x09EUBni)q71X%|+X4K2>?g6yby8K@&#ypAo>{ZZCT-b4(HKU74XR zCR@%a5PNVd+ZRGrw4e0!??UM;rkgIh&|g+RACP(Fi$F|)`5dhlQoR;^90SR@=%P6} z`A^t|t7Wp=-YYAnQ1s*yBpWbjfJv0X&} z-_l!?78kOECqTz*VEr7@_wTtPl_B?}KmeB+NIA$_E*CGv43M+*wnc$_*YuN*p)jan zG?WDHsidsbl~7}`_%%#<;BGDRn08L7p3oBL9<1bA5V9Yg+hPNzq@O1A$;Ex@_mE+{MvCer6o~%gdQj=v%NqG>nnsMhlj; zVmhPn7Mo95BPU$SMhvOT886#XKbpbqnD%Pog6Gr4+^peZ!Y5GlIF}~lGg{K3B35+& z>b>AS$_b`CqXk}{!@%wXBLR8DF1J9O<$$plKd0GTf$-?ZNm`se79R>Qd**RhEq-wZ zv)C4e%tcyH!or7I2NC)?#UH!-8*ayvtqc)1U+U6JNXnc3=5E7Sl!5}_YVREyQ(kYG zyV9-weU8;dCDzpqH4u=!K#IglprdL1IH~v=r%GqSQG-PkNdyadAO$d6^L0A7*Nh`+ z_eUzox03jqXHsyQ9c|3meO(YZD1?_3-q)GB7e6|)a4voS7F-yOwAz`n^UO%tow_P~ zaH@6Q?mI=5lS1c&I;9F=TGCl;5wk*Z2nD;V_I{gr3Cj; zUg0KtFPGhNNo249C zzo8nLHMDQz9F7j~P;Sk?cUJhEqnoL8_YIMh;VH@?=;&+Wf186yzqxmno0y@7NUPSn zF2eCsbzH@1f-ymlT$|<_oNivZs1%nv%=!_7;3%g64kLRq9kIKE_3`a_1Tuf(6<=!G z{b{SPviNO@#N$3g61fBBA|$exX8vz6DJmD0>EM**>P6s+iJ5}%ucu|NH zj-sL^6lbFMm2ZCojpVwZjbVP41Dx(#q$b9XQcUNn1Lz@`__r7Li*o%>7DVl0Q16yC z(XZm#*WoG$&l^3sPttbhCVuA1^w?PwW7rH0sSU>(yfUtxfU}z--^gC_l#8?e^y42J zX-JNtk46IyOEMQB7WU@tXeO8ur*Cdo?>DLmdslZG2Gb6lDr+Qsocy%H8j|u@E;>_% zrqEPxJC#CkGKpSK9E_IekXnAPkevDW(_P92B03G?{)SWhG+zE&vS);84mPtG=N?WuAJAukneO&2VviB1?n2 zB>^6cB+@RYkN&`j9+F>L-+UEY^CWsaTfm-f>(oc$N3m^%khjcjj|HayYGev)@V>x3 zDDscpQc@{b#o{|L#;?Jh?#49Ym_UAD#3BVOMl$bpk|O0nv?&Ju7^o`h&ihNuYWO)L zQT)Q}fO{)X;_@hip#5AU50WBT7VdsRzGqI$!|u7bK?0oe&k?cVZ@R^oRKx~AQjz+n;vljZI0-O4)S;~$EQPHs08`X5n|3lXtu(Dl0) z@f{_bXp+ZGWj_kmYkn2^Qi`c84)~1!=}EE})}&D5B;tYFGjw_T-d8uiDxXwy5K~Su z&V8=MHIvQdzMIrsXkO5DLH*^}7<;iAr-KOhos`e8q~TKhp4ho#P7_@1GdO-bET>;$ zOc?+01*uV`pUs*yRv6RDNgO`-OCLqp@R6NaDx1J(pW=195@mFt2A7Z4aZ11g!u)8d z9{$tzQ@7u-_2fCR2km?O$3O$vkn==!<8%!@mB;G-dg{+?2r*sv6YM%Wsgt`@ohT|1 z4r?15hm9adgPg8wV;16==NBxC{m`#7dlE7z+oB4t3s_<-L+sK#XF08rqU1XxlGJ+@|Zlo9a7nM(9&6x zOTz?z^9u^Xbd9eT(Mn3}{uqGlbDDw!1F@b4eP2JcvB91&+PnzM06hs?;YKCNw433E zojrIv9~}%!x<52-Re#pui^2vtEeoER@MhfNfSX#LThe46&0j>U3x^UTUO&gzbP}d~ zdHQ3SL5L9fBn3G!uaC9m?B7cFmADt<`QVzJBF>Fkg(YY^y0tddL%?MD1HgYCj`&TvshF zEd!r1=7aR_FEsY${rh7`UdVicQoWrXBOQa9mRn+9rps&bYa`sTL%- z$&zOxstfd5+r{m9qTUtZ1-@8Hc(NV=3q&y(cwBO!j(tpic%2ysRz@YN{Mr_Pzlv^d z6uG?~l|DWIfbu$o7feeE>OkUmpI4Bugg`xDX+^^2a_Rm`XK=s%`a_26`D#P_5jK_k zi;~tk>{sGD=J``c9$_%YgWK&V@hvgv(I3L)TrJNh%*;=Q!QGyqSwVY_6~Ykuipk4E z)5yR*xuVU;+bGUNa64dUDda{=c0N0z z$|FqXzvzMGRyupQ^v@d!gK>F_s{4n$UMiR2619XhI!E6`A#1 zxZ)nR#(++pnq>19EvvRDMC#ixEc-o%Xx-t?;9xDKNB}k|VjmLUE|rc6)MSpm(ZxXn za8%W9(`;6E>{RA9;q%jRh1&C}B=*wC&$cuPb$FJ0($yZ8Mjr|(I3 zwbCSRh=eQ*6+i0^Kch`eI7$|mXjN;xGjB=H?I%-sc3@l}iw$fiPZj>jK`InHy((RJ z`*uFb(X8`a!}h__RI%z-7lmXLGz+CfmClu&Qqdd_9vSWRCiy&D+!CV#Y%>gIeBcLN zvvw^Bd)ONtyZoCYz|#8nHD|(-!OWRLd8|{dIVEwf5qEd^3c6%lZ(pBYKIdw?MyOJ2 zzKPTx--EOMF8BVJy$J36cjiCXXh5*7%j&?|`B32#)w8wA_94@@s$am!xSbi@P{yQc z!6%TDd7s2X1FcwvdBpuvZI~!0D!Vt~HbXMIHj+w2Rsj!)EWO)oF~1P-+xca){QI@F zC}1Geu+yB})ysT_?VngfBxp=}#;3Eb7C$alP?^Ui`YV)(64*k@UbmCwoy91ZL9>gF zLnk=skwlvC%2GWOGb_K8xq8cueN*FqOFRz zGl%o+pbnbkjHZCyL`C0N`iJ$w+Bt!b$^K ziU#CF;^RK1rVEsf!W)DwbDq;cX4WDigup*(7YW4FrB;-n$e*Kvg7BPEi& zqa#G_oFDT~cij)VHQxQh%@q*ZUIWbpHqC98f9*Q%*mBf+Y^&j8bwhusYP|4R3%xzq zVNr>j{Oe$~;Cf1hs?pRk_lfU{Qn)&R#{co~KuBeXRS%8z8OZZFkZ23y731j#_Z-7y z)7ga~TiqR$Dl(N4IWM6~N^Bn0HP+ORVjg&I^78VSXbC3A1zAft(EXEtgow(8lra(< zu0MYnCXi`(`4&@)dH>%HaC7^~)wQjvgiVwq{W=9n(Ii6`2Skhef!*tuUp{)j(SINK zE1Qg1bmi`+orr^D>Mg9Rr%U8{rqRlai`d-z>!6LM+oNX^5(;mK9Dm6MeFQN5EeQy! zR?JC>fEdVhk%-Gt9SUX4douNoqne4vfW~FZ-nRAE{54f*R*$Lu>B|SPpL?}%7PRh( z(ph?}19OoHk??demMKC&5=-(o!e#gOWTD)i#UVp%)S>szykOAE#83#quBEu` zk@jpmTprN0fc=?bVnlbk?3i@5d;3{yRwX!7p!7VGkOVpVKErij8dEE_$($v8G6{r_ zV^|b3{QUiZZbOglMc{+5q4uS`Md**~&9&qPN4sSk;VNZuj)}OYZOYsSpG>vCALvW& z8_Hr&-6{IX;MU~Z8B;JOK~KxVf;`bRnyIsr6AzI!yozxyklg}>^>1F~gQlA|PTTy( zsI&<>({+l`zl&#+W?;?>L8yz+^$c|@dfSx2#XDuTsS>nn3VRIMFo==%48K&A0a*B~ zqj)=z+-LFJaSvU6Y)mWEmZCXe!8v{-u|`>ZQq;bD%p5PGTU9`lm^+nRPjlb~azg{q zmFNmn{3IR+;b}yLUz6Ra&JjIb54->LdD}eX`0Be)PfzjA4yFA!=E1|7{iOS?R;Z6k z9zi%*AhKfn5Wf3GTdKS7HE`IwaM!j0brUe5JurEzPVMTNgu!c%A)#zJK`ZvNCp^<)Qz`EBqgD6;!Y}}RfOZVP0xnk0jL}}CI z*stcCv(;A^`dv%E0RUq{!?i+BC;mU(?zuil-11UM@-R!HD-!HC6Gocc)%?K99Cc-3- z5Sc3niWZ|J-6ZZ2S(6iP@~OI#7}g9x>HwKjaZJK~+pH(x`Nmm_7ghyq6J+Yu^$7>*X z3M?8>G^IW{Rk3VsTWsZWL~)|?cI8Fn$wXti8l$}&Mk*axKB9w@Ctr0*6#l*zvHOf9 z+rx`y?N|ExYdO$9^wK?Wu=`T2$(fy6R(Im-YRwy;-AJSJHu3$5@!He7cW9O z)YLEFXMQKWI3!N2dogjk7E;c+-JDRx zUY(lG6zn<4S8N%1Ifg4YT=mqMk7+s_>{o6uJtZ)9s=9WZ&M@blxGr9ei(9kp1TEi; z&Z_M-f69+lLP!~D#2IpJiVp<*(dE>wo?k4Zl{zHlFU3fE@`2ITRJmzNtx+sQp5O#G zjn25sFQ8wqlH=o*Fh36}7`31`Pttf0PT*~usJ=XBsi>1cDWqBKaMH(!ux@V9dX z=>BCTI~hV89=|C{-H0Uj2lzo=6HB@NsK?38!|j~*XXRSZj~!!FH^<{q@Y@8q(|C$Uy*%^4oS?#r zU!!%qg;;I88E7?iAyhT?!DJP>U49PUmT?}*v*T+etvOF{>3K4CrWb|Vl3F&bU$r|w z_M%e%@1{LQE3h(A5P=?B&7kK$ynDIgxb^E)V% zLpwclDs8~Q*3Q*I?pAAK_%Hp{u!K%|1ygad_iNLLH74?|E&h2?&CGPvs*U=$=TF!` z9F1!3|7F@Bd$Zd`>t}ydNye|)lgRYZN2|$JXq$w*gFx7AckGHQ%XSSo-PgEyhpPsG zceXpU#gF;lXExvKweL0%i6Bhuz)t}n#KyK54^Xs-AVmAV6Xt4Xk@hHTBiWV*kQ}T; z#3q{!jEYuPensfh(xbV#yZ4-Ml$I(f&BQD`W)l}Lf5INp0eWF?ml2=bQQ(5nn9jzS zofE`eQFqA2s++up?*Fv|AYmp4?9;HF^?EWzp@$TpSIsS@W#@V|n82Tzz!@rbR4fB| z5wDzl-NwM(6BsWnj|wdD3_E9GLU=5&uR+J*C7kir>a@UC zkMK1Gm7%%o=cn<-S-jru1X4;zCuAfE1LCl*KRrGI`~s6!Q#@FRV>w7saq|4G&DSSH zXh1Hnl%XN`-< z+g^u#8!1N1kkY`g#{m?zK2P2Uc3cVNff}NRczj?J3JCOHs()7TAgK zm!bbDZ=s0{x|YPOMAx$1UYS);!3DP1>!g|@e+Wg9^iyp{P%C1ZI1>}QpYC$Gqo5XM zFQaVtR;ETU$2G#$bzpa*u{P07|eJ4p3_Jd4DHTi!I<2mG82Wclp|m4Dbo91 zH4gD_I}R|-`>ETn3PQZIxX^-&i{&>alsm7zAx2NnEC$2aDKHAa&sQwbfaK(910^L> zSM&coX;o2WUR_9D(Dl|O^7-eXPIx41g3i zip^8ai32{L&d!3_Emq%jeVLi^;+(#495Md1Lv$hSd3ctC%Asz1?Xb_ z&t#r0m1li2^jPjFMQW>U%kA2jIw?<`EkOjO3!PfH-=Q&FIz9Wh&YY15*O;yuJDd)+ zH2+PPetRa=@LAs{-sHhe^WlFA(E*B!n;%_&ObjiTxC-kq6A%zU1LEa7C|giG*_T4e ziW7aSgt!6ajmV;%$s{yMR^nq%u46Lme+`O}|ByxlDs&a;h>D0Ghjr}E6rTA#{-O!-(kzQyVmo4OfUdYliWLHnR029 GZ~p-h#4;ZM literal 0 HcmV?d00001 diff --git a/script/import-mlperf-tiny-to-experiment/customize.py b/script/import-mlperf-tiny-to-experiment/customize.py new file mode 100644 index 0000000000..8929cba8d7 --- /dev/null +++ b/script/import-mlperf-tiny-to-experiment/customize.py @@ -0,0 +1,458 @@ +import cmind as cm +from cmind import utils + +import os +import subprocess +import json + +file_summary_json = 'mlperf-inference-summary.json' +file_result = 'cm-result.json' + +fix_benchmark_names = {'anomaly_detection':'ad', + 'image_classification':'ic', + 'keyword_spotting':'kws', + 'visual_wake_words':'vww'} + +def preprocess(i): + + env = i['env'] + + cur_dir = os.getcwd() + + # Query cache for results dirs + r = cm.access({'action':'find', + 'automation':'cache,541d6f712a6b464e', + 'tags':'get,repo,mlperf-tiny-results'}) + if r['return']>0: return r + + lst = r['list'] + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = 'v'+t[8:] + break + + r = convert_repo_to_experiment(path, version, env) + if r['return']>0: return r + + print ('') + + return {'return':0} + + +def convert_repo_to_experiment(path, version, env): + print ('') + print ('Processing MLPerf repo from CM cache path: {}'.format(path)) + print ('* Version: {}'.format(version)) + + cur_dir = os.getcwd() + + # Get Git URL + os.chdir(path) + + burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + url = burl.decode('UTF-8').strip() + + print ('* Git URL: {}'.format(url)) + + # Create virtual experiment entries + experiments = {} + + for division in ['closed', 'open']: + p1 = os.path.join(path, division) + if os.path.isdir(p1): + print (' * Processing division: {}'.format(division)) + + companies = os.listdir(p1) + + for company in companies: + p2 = os.path.join (p1, company) + if os.path.isdir(p2): + print (' * Processing company: {}'.format(company)) + + presults = os.path.join(p2, 'results') + psystems = os.path.join(p2, 'systems') + pcode = os.path.join(p2, 'code') + + if os.path.isdir(presults) and os.path.isdir(psystems) and os.path.isdir(pcode): + # Exception for OctoML + presults2 = [presults] + + if company == 'OctoML' and version == 'v1.0': + presults2 = [] + + p3 = os.listdir(presults) + for p3x in p3: + p3y = os.path.join(presults, p3x) + if os.path.isdir(p3y): + presults2.append(p3y) + + for presult in presults2: + systems = os.listdir(presult) + for system in systems: + psystem = os.path.join(presult, system) + if os.path.isdir(psystem): + print (' * Processing result for system: {}'.format(system)) + + # Check system file + psystem_desc = os.path.join(psystems, system+'.json') + psystem_dict = {} + + print (' File: {}'.format(psystem_desc)) + + # Check exceptions + if version == 'v1.0': + if company == 'OctoML': + x = os.path.basename(presult) + psystem_desc = os.path.join(psystems, 'system_description_'+system.replace('-','')+'_'+x+'.json') + elif company == 'STMicroelectronics': + psystem_desc = os.path.join(psystems, system, system+'_system_description.json') + if not os.path.isfile(psystem_desc): + psystem_desc = os.path.join(psystems, system, system.replace('-','_')+'_system_description.json') + elif company == 'syntiant': + psystem_desc = os.path.join(psystems, system, system+'.json') + elif company == 'hls4ml': + psystem_desc = os.path.join(psystems, 'system_description_pynq.json') + elif version == 'v0.7': + if company == 'renesas': + psystem_desc = os.path.join(psystems, system+'_System_Description.json') + elif company == 'STMicroelectronics': + psystem_desc = os.path.join(psystems, system, system+'_system_description.json') + if not os.path.isfile(psystem_desc): + psystem_desc = os.path.join(psystems, system, system.replace('-','_')+'_system_description.json') + elif company == 'syntiant': + psystem_desc = os.path.join(psystems, system, system+'.json') + elif company == 'hls4ml-finn': + psystem_desc = os.path.join(psystems, 'system_description_'+system[:4]+'.json') + + + if os.path.isfile(psystem_desc): + x = '' + if version == 'v1.0': + if company == 'OctoML': + x='}\n\t"' + elif company == 'syntiant': + x='"\n\t"' + elif company == 'hls4ml': + x='dummy' + elif version == 'v0.7': + if company == 'syntiant': + x='"\n\t"' + + if x!='': + r = utils.load_txt(psystem_desc) + if r['return']>0: return r + + s = r['string'] + + j = s.find(x) + if j>=0: + s=s[:j+1]+','+s[j+1:] + + if s.endswith(',\n'): + s=s[:-2]+'}' + + psystem_dict = json.loads(s) + + else: + r = utils.load_json(psystem_desc) + if r['return']>0: return r + psystem_dict = r['meta'] + + else: + print (' * Warning: system description not found in {}'.format(psystem_desc)) + input (' Press to continue') + + for benchmark in os.listdir(psystem): + pbenchmark = os.path.join(psystem, benchmark) + if os.path.isdir(pbenchmark): + print (' * Processing benchmark: {}'.format(benchmark)) + + models = [''] + + # May have retrained models + pperf = os.path.join(pbenchmark, 'performance', 'results.txt') + if not os.path.isfile(pperf): + pperf = os.path.join(pbenchmark, 'performance', 'performance_results.txt') + + if not os.path.isfile(pperf): + # likely models + models = [] + + for model in os.listdir(pbenchmark): + pmodel = os.path.join(pbenchmark, model) + if os.path.isdir(pmodel): + models.append(model) + + for model in models: + + results = {} + + if model!='': + print (' * Processing model: {}'.format(model)) + pbenchmark = os.path.join(psystem, benchmark, model) + + perf_file_type=0 + pperf = os.path.join(pbenchmark, 'performance', 'results.txt') + if not os.path.isfile(pperf): + pperf = os.path.join(pbenchmark, 'performance', 'performance_results.txt') + perf_file_type=1 # outdated/weird + + paccuracy = os.path.join(pbenchmark, 'accuracy', 'results.txt') + if not os.path.isfile(paccuracy): + paccuracy = os.path.join(pbenchmark, 'accuracy', 'accuracy_results.txt') + + penergy = os.path.join(pbenchmark, 'energy', 'results.txt') + + if os.path.isfile(pperf) and os.path.isfile(paccuracy): + r = utils.load_txt(pperf) + if r['return']>0: return r + + s = r['string'] + + median_throughput=0 + + x1='Median throughput is ' if perf_file_type==0 else 'Throughput :' + x2=21 if perf_file_type==0 else 18 + + j = s.find(x1) + if j>=0: + j1 = s.find(' inf./sec.', j) + if j1>=0: + median_throughput=float(s[j+x2:j1].strip()) + results['median_throughput']=median_throughput + results['median_throughput_metric']='inf./sec.' + results['Result']=median_throughput + results['_Result']=median_throughput + + if median_throughput==0: + print (' * Warning: median_throughput was not detected in {}'.format(pperf)) + input (' Press to continue') + + r = utils.load_txt(paccuracy, split=True) + if r['return']>0: return r + + lines = r['list'] + + found=False + + for line in lines: + j = line.find('ulp-mlperf: ') + if j>=0: + j1 = line.find(':', j+12) + if j1>=0: + accuracy_key = 'accuracy_'+line[j+12:j1] + value = line[j1+2:] + + if value.endswith('%'): + value = value[:-1] + results[accuracy_key+'_metric']='%' + + value = float(value) + + results[accuracy_key] = value + + if not found: + # first value + results['Accuracy'] = value + results['_Accuracy'] = value + + + found = True + + if not found: + print (' * Warning: accuracy not found in the file {}'.format(paccuracy)) + input (' Press to continue') + + else: + print (' * Warning: performance or accuracy files are not present in this submission') + input (' Press to continue') + + if os.path.isfile(penergy): + r = utils.load_txt(penergy) + if r['return']>0: return r + + s = r['string'] + + median_throughput=0 + + j = s.find('Median throughput is ') + if j>=0: + j1 = s.find(' inf./sec.', j) + if j1>=0: + median_throughput=float(s[j+21:j1]) + + results['median_energy_median_throughput']=median_throughput + results['median_energy_median_throughput_metric']='inf./sec.' + + if median_throughput==0: + print (' * Warning: median_throughput was not detected in {}'.format(penergy)) + input (' Press to continue') + else: + median_energy_cost=0 + + j = s.find('Median energy cost is ') + if j>=0: + j1 = s.find(' uJ/inf.', j) + if j1>=0: + median_energy_cost=float(s[j+22:j1]) + + results['median_energy_cost']=median_energy_cost + results['median_energy_cost_metric']='uj/inf.' + + if median_energy_cost==0: + print (' * Warning: median_energy_cost was not detected in {}'.format(penergy)) + input (' Press to continue') + + print (' * Results dict: {}'.format(results)) + + # Finalizing keys + results.update(psystem_dict) + + xbenchmark = benchmark if benchmark not in fix_benchmark_names else fix_benchmark_names[benchmark] + + results['git_url']=url+'/tree/master/'+division+'/'+company + + results['version']=version + results['__version']=version + results['Organization']=company + results['__Organization']=company + results['Division']=division + results['Benchmark']=xbenchmark + results['__System']=system + + if model!='': + results['Model']=model + results['__Model']=model + + + # Prepare experiment name + cm_name = 'mlperf-tiny--{}--'+division+'--'+xbenchmark + print (' * CM experiment name: {}'.format(cm_name)) + + name_all = cm_name.format('all') + name_ver = cm_name.format(version) + + for name in [name_all, name_ver]: + if name not in experiments: experiments[name]=[] + experiments[name].append(results) + + + else: + print (' * Warning: some directories are not present in this submission') + input (' Press to continue') + + os.chdir(cur_dir) + + r=utils.save_json(file_summary_json, experiments) + if r['return']>0: return r + + env_target_repo=env.get('CM_IMPORT_TINYMLPERF_TARGET_REPO','').strip() + target_repo='' if env_target_repo=='' else env_target_repo+':' + + # Checking experiment + print ('') + for name in experiments: + print (' Preparing experiment artifact "{}"'.format(name)) + + tags = name.split('--') + if 'mlperf' not in tags: tags.insert(0, 'mlperf') + + # Checking if experiment already exists + r = cm.access({'action':'find', + 'automation':'experiment,a0a2d123ef064bcb', + 'artifact':target_repo+name}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + r = cm.access({'action':'add', + 'automation':'experiment,a0a2d123ef064bcb', + 'artifact':target_repo+name, + 'tags':tags}) + if r['return']>0: return r + + path = r['path'] + else: + path = lst[0].path + + results = experiments[name] + + # Check if already date directory + dirs = os.listdir(path) + + path2 = '' + for d in dirs: + dd = os.path.join(path, d) + if os.path.isdir(dd): + path2 = dd + break + + if path2=='': + + r = utils.get_current_date_time({}) + if r['return']>0: return r + + date_time = r['iso_datetime'].replace(':','-').replace('T','.') + + path2 = os.path.join(path, date_time) + + os.makedirs(path2) + + # Check if cm-result.json + fresult = os.path.join(path2, file_result) + + if os.path.isfile(fresult): + r=utils.load_json(fresult) + if r['return']>0: return r + + existing_results = r['meta'] + + # Need to check which ones to add + for result in existing_results: + found = False + + # New results + for result2 in results: + matched = True + + # Need to iterate over keys in the new results since old results can have more keys (derivates, etc) + for k in result2: + if k!='uid': + if k not in result or result2[k]!=result[k]: + matched = False + break + + if matched: + found = True + break + + if not found: + results.append(result) + + # Check extra keys + final_results=[] + for result in results: + # Generate UID + if 'uid' not in result: + r=utils.gen_uid() + if r['return']>0: return r + + result['uid'] = r['uid'] + + # Write results + r=utils.save_json(fresult, results) + if r['return']>0: return r + + + return {'return':0} diff --git a/script/import-mlperf-training-to-experiment/README-extra.md b/script/import-mlperf-training-to-experiment/README-extra.md new file mode 100644 index 0000000000..abfc764233 --- /dev/null +++ b/script/import-mlperf-training-to-experiment/README-extra.md @@ -0,0 +1,54 @@ +# About + +This portable script converts raw results from the [MLPerf™ Training benchmark]( https://github.com/mlcommons/training ) +to the [MLCommons CM format](https://github.com/mlcommons/ck) for the [Collective Knowledge Playground](https://x.cKnowledge.org). + +The goal is to make it easier for the community to analyze MLPerf results, +add derived metrics such as performance/Watt and constraints, +and link reproducibility reports. + +Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results). + +You can see these results at [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-training,all). + +## Usage + +We have tested this portable CM script on Ubuntu. + +Install [MLCommons CM automation language](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@ck +``` + +Install repositories with raw MLPerf training benchmark results: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v1.0" --extra_cache_tags=mlperf-training-results,version-1.0 --branch=master --depth="" +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v1.1" --extra_cache_tags=mlperf-training-results,version-1.1 --branch=main --depth="" +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v2.0" --extra_cache_tags=mlperf-training-results,version-2.0 --branch=main --depth="" +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v2.1" --extra_cache_tags=mlperf-training-results,version-2.1 --branch=main +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v3.0" --extra_cache_tags=mlperf-training-results,version-3.0 +``` + +You can install private submission repository as follows: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_training_v3.0" --extra_cache_tags=mlperf-training-results,version-3.0-private --branch=main --depth=4 +``` + +Convert raw MLPerf training results into CM experiment entries: +```bash +cmr "import mlperf training to-experiment" +``` + +Visualize results on your local machine via CK playground GUI: +```bash +cmr "gui _playground" +``` + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-training,all). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/script/import-mlperf-training-to-experiment/README.md b/script/import-mlperf-training-to-experiment/README.md new file mode 100644 index 0000000000..4de8407719 --- /dev/null +++ b/script/import-mlperf-training-to-experiment/README.md @@ -0,0 +1,143 @@ +Automatically generated README for this automation recipe: **import-mlperf-training-to-experiment** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=import-mlperf-training-to-experiment,b13d9b7337414f17) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-training-to-experiment)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "import mlperf training mlperf-training experiment 2experiment to-experiment" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment` + +`cm run script --tags=import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment [--input_flags]` + +*or* + +`cmr "import mlperf training mlperf-training experiment 2experiment to-experiment"` + +`cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags]` + + +#### Run this script from Python + +

+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "import mlperf training mlperf-training experiment 2experiment to-experiment" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--target_repo=value` → `CM_IMPORT_MLPERF_TRAINING_TARGET_REPO=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "target_repo":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-training-to-experiment/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlperf,logging + - CM script: [get-mlperf-logging](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-logging) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-training-to-experiment/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-training-to-experiment/_cm.yaml) + 1. ***Run native script if exists*** + * [run_mlperf_logger.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-training-to-experiment/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/import-mlperf-training-to-experiment/_cm.yaml) + +___ +### Script output +`cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/import-mlperf-training-to-experiment/_cm.yaml b/script/import-mlperf-training-to-experiment/_cm.yaml new file mode 100644 index 0000000000..7c7013e5e8 --- /dev/null +++ b/script/import-mlperf-training-to-experiment/_cm.yaml @@ -0,0 +1,39 @@ +# Identification of this CM script +alias: import-mlperf-training-to-experiment +uid: b13d9b7337414f17 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - import + - mlperf + - training + - mlperf-training + - experiment + - 2experiment + - to-experiment + +input_mapping: + target_repo: CM_IMPORT_MLPERF_TRAINING_TARGET_REPO + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + - names: + - python + - python3 + tags: get,python3 + + - tags: get,mlperf,logging diff --git a/script/import-mlperf-training-to-experiment/customize.py b/script/import-mlperf-training-to-experiment/customize.py new file mode 100644 index 0000000000..19a69a6af8 --- /dev/null +++ b/script/import-mlperf-training-to-experiment/customize.py @@ -0,0 +1,335 @@ +import cmind as cm +from cmind import utils + +import os +import subprocess +import csv +import json +import copy + + +file_summary = 'summary.csv' +file_summary_json = 'mlperf-training-summary-{}.json' +file_summary2 = 'summary.xlsx' +file_result = 'cm-result.json' + +model2task = { + "resnet":"image-classification", + "maskrcnn":"object-detection-heavy-weight", + "ssd":"object-detection-light-weight", + "minigo": "reinforcement-learning", + "rnnt":"speech-recognition", + "bert":"language-processing", + "dlrm":"recommendation", + "3dunet":"image-segmentation" +} + +model2dataset = { + "resnet":"ImageNet", + "maskrcnn":"COCO", + "ssd":"OpenImages", + "minigo": "Go", + "rnnt":"LibriSpeech", + "bert":"Wikipedia", + "dlrm":"1TB Clickthrough", + "3dunet":"KiTS19" +} + + +model2accuracy = { + "resnet":75.9, + "maskrcnn":0.377, + "ssd":34.0, + "minigo": 50, + "rnnt":0.058, + "bert":0.72, + "dlrm":0.8025, + "3dunet":0.908 +} + +model2accuracy_metric = { + "resnet":"% classification", + "maskrcnn":"Box min AP", + "ssd":"% mAP", + "minigo": "% win rate vs. checkpoint", + "rnnt":"Word Error Rate", + "bert":"Mask-LM accuracy", + "dlrm":"AUC", + "3dunet":"Mean DICE score" +} + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + cur_dir = os.getcwd() + + # Clean summary files + for f in [file_summary, file_summary2]: + if os.path.isfile(f): + os.remove(f) + + # Query cache for results dirs + r = cm.access({'action':'find', + 'automation':'cache,541d6f712a6b464e', + 'tags':'get,repo,mlperf-training-results'}) + if r['return']>0: return r + + lst = r['list'] + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = t[8:] + break + + # Run MLPerf logger + run_script_input = i['run_script_input'] + automation = i['automation'] + + env['CM_MLPERF_TRAINING_REPO_PATH'] = path + env['CM_MLPERF_TRAINING_CURRENT_DIR'] = cur_dir + env['CM_MLPERF_TRAINING_REPO_VERSION'] = version + + print ('') + print ('Repo path: {}'.format(path)) + print ('Repo version: {}'.format(version)) + + r = automation.run_native_script({'run_script_input':run_script_input, + 'env':env, + 'script_name':'run_mlperf_logger'}) + if r['return']>0: + return r + + r = convert_summary_csv_to_experiment(path, version, env) + if r['return']>0: return r + + return {'return':0} + + +def convert_summary_csv_to_experiment(path, version, env): + print ('* Processing MLPerf training results repo in cache path: {}'.format(path)) + + cur_dir = os.getcwd() + + # Get Git URL + os.chdir(path) + + burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + url = burl.decode('UTF-8').strip() + + print (' Git URL: {}'.format(url)) + + os.chdir(cur_dir) + + if not os.path.isfile(file_summary): + return {'return':1, 'error':'{} was not created'.format(file_summary)} + else: + summary = [] + + with open (file_summary, encoding = 'utf-8') as fcsv: + csv_reader = csv.DictReader(fcsv) + + for rows in csv_reader: + result = {} + + keys = rows.keys() + + for k in keys: + v = rows[k] + + if v == 'False': + v=False + elif v == 'True': + v=True + else: + try: + v=float(v) + + if v==int(v): + v=int(v) + except ValueError: + pass + + result[k] = v + + # Add extra tags + if url!='': + result['git_url']=url + + location = result.get('Location','') + if location != '': + result['url']=url+'/tree/master/'+location + + if result.get('Accuracy',0)>0: + result['Accuracy_div_100'] = float('{:.5f}'.format(result['Accuracy']/100)) + + # Add ratios + + + # Append to summary + summary.append(result) + + r=utils.save_json(file_summary_json.format(version), summary) + if r['return']>0: return r + + # Create virtual experiment entries + experiment = {} + + for result in summary: + + for model in model2task: + if result.get(model, '')!='': + result1 = {} + + result1['Result'] = result[model] + result1['Result_Units'] = 'min.' + result1['Accuracy'] = model2accuracy[model] + result1['Accuracy_Metric'] = model2accuracy_metric[model] + result1['Task'] = model2task[model] + result1['Benchmark'] = model2task[model] + result1['Dataset'] = model2dataset[model] + result1['Model_ID'] = model + + result1['_Result'] = result[model] + result1['_Result_Units'] = 'min.' + result1['_Accuracy'] = model2accuracy[model] + result1['_Accuracy_Metric'] = model2accuracy_metric[model] + result1['_Task'] = model2task[model] + result1['_Dataset'] = model2dataset[model] + result1['_Model_ID'] = model + + result1['version']=version + result1['_version']=version + result1['Organization']=result['submitter'] + result1['_Organization']=result['submitter'] + result1['_System']=result['system'] + + for k in result: + if k==model or k not in model2task: + result1[k]=result[k] + + xdivision = result['division'] + + name = 'mlperf-training--{}--'+xdivision+'--'+model2task[model] + + name_all = name.format('all') + name_ver = name.format(version) + + for name in [name_all, name_ver]: + if name not in experiment: experiment[name]=[] + experiment[name].append(result1) + + # Checking experiment + env_target_repo=env.get('CM_IMPORT_MLPERF_TRAINING_TARGET_REPO','').strip() + target_repo='' if env_target_repo=='' else env_target_repo+':' + + print ('') + for name in experiment: + print (' Preparing experiment artifact "{}"'.format(name)) + + tags = name.split('--') + if 'mlperf' not in tags: tags.insert(0, 'mlperf') + + # Checking if experiment already exists + r = cm.access({'action':'find', + 'automation':'experiment,a0a2d123ef064bcb', + 'artifact':target_repo+name}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + r = cm.access({'action':'add', + 'automation':'experiment,a0a2d123ef064bcb', + 'artifact':target_repo+name, + 'tags':tags}) + if r['return']>0: return r + + path = r['path'] + else: + path = lst[0].path + + results = experiment[name] + + # Check if already date directory + dirs = os.listdir(path) + + path2 = '' + for d in dirs: + dd = os.path.join(path, d) + if os.path.isdir(dd): + path2 = dd + break + + if path2=='': + + r = utils.get_current_date_time({}) + if r['return']>0: return r + + date_time = r['iso_datetime'].replace(':','-').replace('T','.') + + path2 = os.path.join(path, date_time) + + os.makedirs(path2) + + # Check if cm-result.json + fresult = os.path.join(path2, file_result) + + if os.path.isfile(fresult): + r=utils.load_json(fresult) + if r['return']>0: return r + + existing_results = r['meta'] + + # Need to check which ones to add + for result in existing_results: + found = False + + # New results + for result2 in results: + matched = True + + # Need to iterate over keys in the new results since old results can have more keys (derivates, etc) + for k in result2: + if k!='uid': + if k not in result or result2[k]!=result[k]: + matched = False + break + + if matched: + found = True + break + + if not found: + results.append(result) + + # Check extra keys + final_results=[] + for result in results: + # Generate UID + if 'uid' not in result: + r=utils.gen_uid() + if r['return']>0: return r + + result['uid'] = r['uid'] + + # Write results + r=utils.save_json(fresult, results) + if r['return']>0: return r + + return {'return':0} diff --git a/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh b/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh new file mode 100644 index 0000000000..112395d5fb --- /dev/null +++ b/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "" +${CM_PYTHON_BIN_WITH_PATH} -m mlperf_logging.result_summarizer "${CM_MLPERF_TRAINING_REPO_PATH}/{*}" training ${CM_MLPERF_TRAINING_REPO_VERSION}.0 -csv summary.csv +# --xls summary.xlsx # Does't work with the latest pandas (need .close() instead of .save()) + +#${CM_MLPERF_LOGGING_SRC_PATH}/scripts/verify_for_v${CM_MLPERF_TRAINING_REPO_VERSION}_training.sh "${CM_MLPERF_TRAINING_REPO_PATH}/ASUSTeK" + +test $? -eq 0 || exit $? diff --git a/script/install-aws-cli/README.md b/script/install-aws-cli/README.md new file mode 100644 index 0000000000..9b2f8718b0 --- /dev/null +++ b/script/install-aws-cli/README.md @@ -0,0 +1,124 @@ +Automatically generated README for this automation recipe: **install-aws-cli** + +Category: **Cloud automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-aws-cli,4d3efd333c3f4d36) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-aws-cli)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,script,aws-cli,aws,cli* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install script aws-cli aws cli" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,script,aws-cli,aws,cli` + +`cm run script --tags=install,script,aws-cli,aws,cli ` + +*or* + +`cmr "install script aws-cli aws cli"` + +`cmr "install script aws-cli aws cli " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,script,aws-cli,aws,cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,script,aws-cli,aws,cli"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,script,aws-cli,aws,cli) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install script aws-cli aws cli" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-aws-cli/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-aws-cli/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-aws-cli/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-aws-cli/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-aws-cli/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-aws-cli/_cm.json)*** + * get,aws-cli + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-aws-cli](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-aws-cli) + +___ +### Script output +`cmr "install script aws-cli aws cli " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-aws-cli/_cm.json b/script/install-aws-cli/_cm.json new file mode 100644 index 0000000000..22bfe216f0 --- /dev/null +++ b/script/install-aws-cli/_cm.json @@ -0,0 +1,33 @@ +{ + "alias": "install-aws-cli", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Cloud automation", + "cache": true, + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + "CM_CURL_URL": "https://awscli.amazonaws.com/awscli-exe-[OS]-[PLATFORM].zip" + }, + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,aws-cli" + } + ], + "tags": [ + "install", + "script", + "aws-cli", + "aws", + "cli" + ], + "uid": "4d3efd333c3f4d36" +} diff --git a/script/install-aws-cli/customize.py b/script/install-aws-cli/customize.py new file mode 100644 index 0000000000..df2744ac4d --- /dev/null +++ b/script/install-aws-cli/customize.py @@ -0,0 +1,17 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return':0} diff --git a/script/install-aws-cli/run.sh b/script/install-aws-cli/run.sh new file mode 100644 index 0000000000..cc3abf3f91 --- /dev/null +++ b/script/install-aws-cli/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo "******************************************************" +echo $CM_CURL_URL +CM_CURL_URL=${CM_CURL_URL//"[OS]"/${CM_HOST_OS_TYPE}} +CM_CURL_URL=${CM_CURL_URL//"[PLATFORM]"/${CM_HOST_PLATFORM_FLAVOR}} +echo $CM_CURL_URL +echo "CM_CURL_URL=${CM_CURL_URL}" >> tmp-run-env.out +FILE="awscliv2.zip" +rm -rf ${FILE} +curl "${CM_CURL_URL}" -o "${FILE}" +unzip ${FILE} +sudo ./aws/install diff --git a/script/install-bazel/README.md b/script/install-bazel/README.md new file mode 100644 index 0000000000..7eb6119d07 --- /dev/null +++ b/script/install-bazel/README.md @@ -0,0 +1,135 @@ +Automatically generated README for this automation recipe: **install-bazel** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-bazel,dfd3d2bf5b764175) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,script,bazel* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install script bazel" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,script,bazel` + +`cm run script --tags=install,script,bazel ` + +*or* + +`cmr "install script bazel"` + +`cmr "install script bazel " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,script,bazel' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,script,bazel"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,script,bazel) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install script bazel" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `7.0.2` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/_cm.json) + 1. ***Run native script if exists*** + * [run-aarch64.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/run-aarch64.sh) + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-bazel/_cm.json)*** + * get,bazel + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-bazel](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-bazel) + +___ +### Script output +`cmr "install script bazel " -j` +#### New environment keys (filter) + +* `CM_BAZEL_*` +#### New environment keys auto-detected from customize + +* `CM_BAZEL_BIN_WITH_PATH` +* `CM_BAZEL_DOWNLOAD_FILE` +* `CM_BAZEL_DOWNLOAD_URL` +* `CM_BAZEL_INSTALLED_PATH` \ No newline at end of file diff --git a/script/install-bazel/_cm.json b/script/install-bazel/_cm.json new file mode 100644 index 0000000000..72491848d4 --- /dev/null +++ b/script/install-bazel/_cm.json @@ -0,0 +1,35 @@ +{ + "alias": "install-bazel", + "automation_alias": "script", + "category": "Detection or installation of tools and artifacts", + "automation_uid": "5b4e0237da074764", + "cache": true, + "default_version": "7.0.2", + "deps": [ + { + "tags": "detect,os" + } + ], + "new_env_keys": [ + "CM_BAZEL_*" + ], + "env": { + "CM_WGET_URL": "https://github.com/bazelbuild/bazel/releases/download/[VERSION]/bazel-[VERSION]-installer-[OS]-[PLATFORM].sh" + }, + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,bazel" + } + ], + "tags": [ + "install", + "script", + "bazel" + ], + "uid": "dfd3d2bf5b764175" +} diff --git a/script/install-bazel/customize.py b/script/install-bazel/customize.py new file mode 100644 index 0000000000..d656e40bac --- /dev/null +++ b/script/install-bazel/customize.py @@ -0,0 +1,60 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION','') + if need_version == '': + return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + + print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + +# if 'CM_GIT_CHECKOUT' not in env: +# env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version + + if os_info['platform'] == 'windows': + prefix = '' + xos = 'windows' + platform = 'x86_64' + ext = '.exe' + else: + prefix = 'installer-' + xos = env['CM_HOST_OS_TYPE'] + platform = env['CM_HOST_PLATFORM_FLAVOR'] + ext = '.sh' + + + filename = 'bazel-{}-{}{}-{}{}'.format(need_version, + prefix, + xos, + platform, + ext) + + url = 'https://github.com/bazelbuild/bazel/releases/download/{}/{}'.format(need_version, filename) + + cur_dir = os.getcwd() + + if os_info['platform'] == 'windows': + bazel_bin = 'bazel.exe' + path = cur_dir + else: + bazel_bin = 'bazel' + path = os.path.join(cur_dir, 'install', 'bin') + + env['CM_BAZEL_DOWNLOAD_URL'] = url + env['CM_BAZEL_DOWNLOAD_FILE'] = filename + + env['CM_BAZEL_INSTALLED_PATH'] = path + env['CM_BAZEL_BIN_WITH_PATH'] = os.path.join(path, bazel_bin) + + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + return {'return':0} diff --git a/script/install-bazel/run-aarch64.sh b/script/install-bazel/run-aarch64.sh new file mode 100644 index 0000000000..0b87532434 --- /dev/null +++ b/script/install-bazel/run-aarch64.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo "******************************************************" + +CM_WGET_URL=${CM_WGET_URL//"[OS]"/${CM_HOST_OS_TYPE}} +CM_WGET_URL=${CM_WGET_URL//"[PLATFORM]"/arm64} +CM_WGET_URL=${CM_WGET_URL//"[VERSION]"/${CM_VERSION}} +CM_WGET_URL=${CM_WGET_URL//"-installer-"/-} +CM_WGET_URL=${CM_WGET_URL//".sh"/} +echo "CM_WGET_URL=${CM_WGET_URL}" > tmp-run-env.out +BAZEL_SCRIPT="bazel-${CM_VERSION}-${CM_HOST_OS_TYPE}-arm64" + +INSTALL_DIR=${CUR_DIR} +rm -rf ${INSTALL_DIR}/bin +wget -c ${CM_WGET_URL} +if [ "${?}" != "0" ]; then exit 1; fi +chmod +x ${BAZEL_SCRIPT} +ln -s ${BAZEL_SCRIPT} bazel +if [ "${?}" != "0" ]; then exit 1; fi + +echo "CM_BAZEL_INSTALLED_PATH=${INSTALL_DIR}" >>tmp-run-env.out +echo "CM_BAZEL_BIN_WITH_PATH=${INSTALL_DIR}/${BAZEL_SCRIPT}" >>tmp-run-env.out + +echo "Bazel is installed to ${INSTALL_DIR} ..." diff --git a/script/install-bazel/run.bat b/script/install-bazel/run.bat new file mode 100644 index 0000000000..7108a47582 --- /dev/null +++ b/script/install-bazel/run.bat @@ -0,0 +1,9 @@ +@echo off + +del /Q /S %CM_BAZEL_DOWNLOAD_FILE% +del /Q /S bazel.exe + +wget -c %CM_BAZEL_DOWNLOAD_URL% -O %CM_BAZEL_DOWNLOAD_FILE% --no-check-certificate +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +ren %CM_BAZEL_DOWNLOAD_FILE% bazel.exe diff --git a/script/install-bazel/run.sh b/script/install-bazel/run.sh new file mode 100644 index 0000000000..e5fe4651d8 --- /dev/null +++ b/script/install-bazel/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "******************************************************" + +CM_WGET_URL=${CM_WGET_URL//"[OS]"/${CM_HOST_OS_TYPE}} +CM_WGET_URL=${CM_WGET_URL//"[PLATFORM]"/${CM_HOST_PLATFORM_FLAVOR}} +CM_WGET_URL=${CM_WGET_URL//"[VERSION]"/${CM_VERSION}} + +echo "CM_WGET_URL=${CM_WGET_URL}" >> tmp-run-env.out + +BAZEL_SCRIPT="bazel-${CM_VERSION}-installer-${CM_HOST_OS_TYPE}-${CM_HOST_PLATFORM_FLAVOR}.sh" + +INSTALL_DIR=${CUR_DIR} + +rm -rf ${INSTALL_DIR}/bin + +wget -c ${CM_WGET_URL} --no-check-certificate + +if [ "${?}" != "0" ]; then exit 1; fi + +chmod +x ${BAZEL_SCRIPT} + +./${BAZEL_SCRIPT} --bin=${INSTALL_DIR}"/bin" --base=${INSTALL_DIR}"/install" +if [ "${?}" != "0" ]; then exit 1; fi + +echo "Bazel is installed to ${INSTALL_DIR} ..." diff --git a/script/install-cmake-prebuilt/README.md b/script/install-cmake-prebuilt/README.md new file mode 100644 index 0000000000..89bc1c9144 --- /dev/null +++ b/script/install-cmake-prebuilt/README.md @@ -0,0 +1,137 @@ +Automatically generated README for this automation recipe: **install-cmake-prebuilt** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-cmake-prebuilt,5a39ef05992b4103) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cmake-prebuilt)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake` + +`cm run script --tags=install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake ` + +*or* + +`cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake"` + +`cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `3.28.3` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cmake-prebuilt/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cmake-prebuilt/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cmake-prebuilt/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cmake-prebuilt/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cmake-prebuilt/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cmake-prebuilt/_cm.json)*** + * get,cmake + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + +___ +### Script output +`cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake " -j` +#### New environment keys (filter) + +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_CMAKE_*` +* `CM_GET_DEPENDENT_CACHED_PATH` +#### New environment keys auto-detected from customize + +* `CM_CMAKE_BIN_WITH_PATH` +* `CM_CMAKE_INSTALLED_PATH` +* `CM_CMAKE_PACKAGE` +* `CM_GET_DEPENDENT_CACHED_PATH` \ No newline at end of file diff --git a/script/install-cmake-prebuilt/_cm.json b/script/install-cmake-prebuilt/_cm.json new file mode 100644 index 0000000000..5eacad9f0b --- /dev/null +++ b/script/install-cmake-prebuilt/_cm.json @@ -0,0 +1,38 @@ +{ + "alias": "install-cmake-prebuilt", + "category": "Detection or installation of tools and artifacts", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "default_version": "3.28.3", + "new_env_keys": [ + "CM_CMAKE_*", + "CM_GET_DEPENDENT_CACHED_PATH", + "+PATH", + "+LD_LIBRARY_PATH", + "+C_INCLUDE_PATH" + ], + "deps": [ + { + "tags": "detect,os" + } + ], + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,cmake" + } + ], + "tags": [ + "install", + "prebuilt", + "cmake", + "prebuilt-cmake", + "install-prebuilt-cmake" + ], + "uid": "5a39ef05992b4103" +} diff --git a/script/install-cmake-prebuilt/customize.py b/script/install-cmake-prebuilt/customize.py new file mode 100644 index 0000000000..263e667c47 --- /dev/null +++ b/script/install-cmake-prebuilt/customize.py @@ -0,0 +1,116 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION','') + if need_version == '': + return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + + print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + + version_split = need_version.split(".") + while len(version_split) < 3: + version_split.append("0") + + need_version = ".".join(version_split) + + host_os_bits = env['CM_HOST_OS_BITS'] + + if os_info['platform'] != 'windows': + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + + # Prepare package name + if os_info['platform'] == 'darwin': + if host_os_bits != '64': + return {'return':1, 'error':'this package doesn\'t support non 64-bit MacOS'} + + package_name = 'cmake-' + need_version + '-macos-universal.tar.gz' + + elif os_info['platform'] == 'windows': + package_name = 'cmake-' + need_version + '-windows-' + + if host_os_bits == '64': + package_name += 'x86_64' + else: + package_name += 'i386' + + package_name += '.zip' + + else: + package_name='cmake-' + need_version + '-linux-' + + if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): + if host_os_bits=='64': + package_name += 'aarch64' + else: + return {'return':1, 'error':'this script doesn\'t support armv7'} + else: + package_name += 'x86_64' + + package_name += '.tar.gz' + + + package_url = 'https://github.com/Kitware/CMake/releases/download/v' + need_version + '/' + package_name + + print (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + + print ('') + print ('Downloading from {} ...'.format(package_url)) + + cm = automation.cmind + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':package_url}) + if r['return']>0: return r + + filename = r['filename'] + + # Check what to do with this file depending on OS + if os_info['platform'] == 'windows': + print ('Unzipping file {}'.format(filename)) + + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', + 'strip_folders':1, + 'filename':filename}) + if r['return']>0: return r + + if os.path.isfile(filename): + print ('Removing file {}'.format(filename)) + os.remove(filename) + + path_bin = os.path.join(os.getcwd(), 'bin') + path_include = os.path.join(os.getcwd(), 'include') + elif os_info['platform'] == 'darwin': + path_bin = os.path.join(os.getcwd(), 'CMake.app', 'Contents', 'bin') + path_include = os.path.join(os.getcwd(), 'CMake.app', 'Contents', 'include') + else: + path_bin = os.path.join(os.getcwd(), 'bin') + path_include = os.path.join(os.getcwd(), 'include') + + env['CM_CMAKE_PACKAGE'] = filename + + env['CM_CMAKE_INSTALLED_PATH'] = path_bin + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + bin_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake' + + env['CM_CMAKE_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) + + # We don't need to check default paths here because we force install to cache + env['+PATH'] = [env['CM_CMAKE_INSTALLED_PATH']] + + if os.path.isdir(path_include): + env['+C_INCLUDE_PATH'] = [ path_include ] + + return {'return':0} diff --git a/script/install-cmake-prebuilt/run.sh b/script/install-cmake-prebuilt/run.sh new file mode 100644 index 0000000000..a7b91ddd2a --- /dev/null +++ b/script/install-cmake-prebuilt/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +echo "" +echo "Unarchiving ${CM_CMAKE_PACKAGE} ..." + +tar --strip 1 -xf ${CM_CMAKE_PACKAGE} +test $? -eq 0 || exit 1 + +rm -f ${CM_CMAKE_PACKAGE} +test $? -eq 0 || exit 1 diff --git a/script/install-cuda-package-manager/README.md b/script/install-cuda-package-manager/README.md new file mode 100644 index 0000000000..c8af286eeb --- /dev/null +++ b/script/install-cuda-package-manager/README.md @@ -0,0 +1,125 @@ +Automatically generated README for this automation recipe: **install-cuda-package-manager** + +Category: **CUDA automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-cuda-package-manager,c1afdff8542f45be) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,package-manager,cuda,package-manager-cuda,install-pm-cuda* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install package-manager cuda package-manager-cuda install-pm-cuda" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,package-manager,cuda,package-manager-cuda,install-pm-cuda` + +`cm run script --tags=install,package-manager,cuda,package-manager-cuda,install-pm-cuda ` + +*or* + +`cmr "install package-manager cuda package-manager-cuda install-pm-cuda"` + +`cmr "install package-manager cuda package-manager-cuda install-pm-cuda " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,package-manager,cuda,package-manager-cuda,install-pm-cuda' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,package-manager,cuda,package-manager-cuda,install-pm-cuda"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,package-manager,cuda,package-manager-cuda,install-pm-cuda) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install package-manager cuda package-manager-cuda install-pm-cuda" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager/_cm.json) + 1. ***Run native script if exists*** + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-package-manager/_cm.json)*** + * get,cuda + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + +___ +### Script output +`cmr "install package-manager cuda package-manager-cuda install-pm-cuda " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-cuda-package-manager/_cm.json b/script/install-cuda-package-manager/_cm.json new file mode 100644 index 0000000000..dc05f3390f --- /dev/null +++ b/script/install-cuda-package-manager/_cm.json @@ -0,0 +1,30 @@ +{ + "alias": "install-cuda-package-manager", + "automation_alias": "script", + "category": "CUDA automation", + "automation_uid": "5b4e0237da074764", + "cache": true, + "deps": [ + { + "tags": "detect,os" + } + ], + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,cuda" + } + ], + "tags": [ + "install", + "package-manager", + "cuda", + "package-manager-cuda", + "install-pm-cuda" + ], + "uid": "c1afdff8542f45be" +} diff --git a/script/install-cuda-package-manager/customize.py b/script/install-cuda-package-manager/customize.py new file mode 100644 index 0000000000..002e85e2ef --- /dev/null +++ b/script/install-cuda-package-manager/customize.py @@ -0,0 +1,16 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + + return {'return':0} diff --git a/script/install-cuda-package-manager/run-ubuntu.sh b/script/install-cuda-package-manager/run-ubuntu.sh new file mode 100644 index 0000000000..ff5bb8e194 --- /dev/null +++ b/script/install-cuda-package-manager/run-ubuntu.sh @@ -0,0 +1 @@ +sudo apt-get install nvidia-cuda-toolkit diff --git a/script/install-cuda-package-manager/run.sh b/script/install-cuda-package-manager/run.sh new file mode 100644 index 0000000000..d52681cbfd --- /dev/null +++ b/script/install-cuda-package-manager/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +CUR=${PWD} +echo "Package installation script not available yet for ${CM_HOST_OS_FLAVOR}" +exit 1 + diff --git a/script/install-cuda-prebuilt/README-extra.md b/script/install-cuda-prebuilt/README-extra.md new file mode 100644 index 0000000000..ca9a792ad5 --- /dev/null +++ b/script/install-cuda-prebuilt/README-extra.md @@ -0,0 +1,4 @@ +# Notes + +This script is in a prototyping alpha stage. Needs to be considerably updated and unified! + diff --git a/script/install-cuda-prebuilt/README.md b/script/install-cuda-prebuilt/README.md new file mode 100644 index 0000000000..6a93b32171 --- /dev/null +++ b/script/install-cuda-prebuilt/README.md @@ -0,0 +1,181 @@ +Automatically generated README for this automation recipe: **install-cuda-prebuilt** + +Category: **CUDA automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-cuda-prebuilt,14eadcd42ba340c3) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-prebuilt)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda` + +`cm run script --tags=install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda[,variations] [--input_flags]` + +*or* + +`cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda"` + +`cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**install-driver**" +
+ Click here to expand this section. + + * `_driver` + - Environment variables: + - *CM_CUDA_INSTALL_DRIVER*: `yes` + - Workflow: + * **`_no-driver`** (default) + - Environment variables: + - *CM_CUDA_INSTALL_DRIVER*: `no` + - Workflow: + +
+ + +#### Default variations + +`_no-driver` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--local_run_file_path=value` → `CUDA_RUN_FILE_LOCAL_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "local_run_file_path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_SUDO: `sudo` + +
+ +#### Versions +Default version: `11.8.0` + +* `11.7.0` +* `11.8.0` +* `12.0.0` +* `12.1.1` +* `12.2.0` +* `12.3.2` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-prebuilt/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-prebuilt/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-prebuilt/_cm.json)*** + * download,file + * CM names: `--adr.['download-script']...` + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-prebuilt/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-prebuilt/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-cuda-prebuilt/_cm.json)*** + * get,cuda + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + +___ +### Script output +`cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_CUDA_*` +* `CM_NVCC_*` +#### New environment keys auto-detected from customize + +* `CM_CUDA_INSTALLED_PATH` +* `CM_NVCC_BIN_WITH_PATH` \ No newline at end of file diff --git a/script/install-cuda-prebuilt/_cm.json b/script/install-cuda-prebuilt/_cm.json new file mode 100644 index 0000000000..f8e729ad66 --- /dev/null +++ b/script/install-cuda-prebuilt/_cm.json @@ -0,0 +1,106 @@ +{ + "alias": "install-cuda-prebuilt", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "CUDA automation", + "cache": true, + "default_version": "11.8.0", + "deps": [ + { + "tags": "detect,os" + } + ], + "input_mapping": { + "local_run_file_path": "CUDA_RUN_FILE_LOCAL_PATH" + }, + "default_env": { + "CM_SUDO": "sudo" + }, + "prehook_deps": [ + { + "tags": "download,file", + "names": [ "download-script" ], + "update_tags_from_env_with_prefix": { + "_url.": [ "WGET_URL" ] + }, + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_CUDA_RUN_FILE_PATH", + "CM_DOWNLOAD_LOCAL_FILE_PATH": "<<>>" + }, + "force_cache": true, + "extra_cache_tags": "cuda,run,file" + } + ], + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,cuda" + } + ], + "new_env_keys": [ + "CM_CUDA_*", + "CM_NVCC_*" + ], + "tags": [ + "install", + "prebuilt", + "cuda", + "prebuilt-cuda", + "install-prebuilt-cuda" + ], + "uid": "14eadcd42ba340c3", + "versions": { + "11.7.0": { + "env": { + "CM_CUDA_LINUX_FILENAME": "cuda_11.7.0_515.43.04_linux.run" + } + }, + "11.8.0": { + "env": { + "CM_CUDA_LINUX_FILENAME": "cuda_11.8.0_520.61.05_linux.run" + } + }, + "12.0.0": { + "env": { + "CM_CUDA_LINUX_FILENAME": "cuda_12.0.0_525.60.13_linux.run" + } + }, + "12.1.1": { + "env": { + "CM_CUDA_LINUX_FILENAME": "cuda_12.1.1_530.30.02_linux.run" + } + }, + "12.2.0": { + "env": { + "CM_CUDA_LINUX_FILENAME": "cuda_12.2.0_535.54.03_linux.run" + } + }, + "12.3.2": { + "env": { + "CM_CUDA_LINUX_FILENAME": "cuda_12.3.2_545.23.08_linux.run" + } + } + }, + "variations": { + "no-driver": { + "group": "install-driver", + "default": true, + "env": { + "CM_CUDA_INSTALL_DRIVER": "no" + } + }, + "driver": { + "group": "install-driver", + "env": { + "CM_CUDA_INSTALL_DRIVER": "yes" + } + } + }, + "docker": { + "run": true + } +} diff --git a/script/install-cuda-prebuilt/customize.py b/script/install-cuda-prebuilt/customize.py new file mode 100644 index 0000000000..a9dafa2e9a --- /dev/null +++ b/script/install-cuda-prebuilt/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + version = env.get('CM_VERSION') + if version not in env.get('CM_CUDA_LINUX_FILENAME', ''): + return {'return': 1, 'error': "Only CUDA versions 11.7.0, 11.8.0, 12.0.0, 12.1.1 and 12.2.3 are supported now!"} + + recursion_spaces = i['recursion_spaces'] + nvcc_bin = "nvcc" + + env['WGET_URL']="https://developer.download.nvidia.com/compute/cuda/"+env['CM_VERSION']+"/local_installers/"+env['CM_CUDA_LINUX_FILENAME'] + + extra_options = env.get('CUDA_ADDITIONAL_INSTALL_OPTIONS', '') + if env.get('CM_CUDA_INSTALL_DRIVER','') == "yes": + extra_options += " --driver" + env['CUDA_ADDITIONAL_INSTALL_OPTIONS'] = extra_options + + env['CM_CUDA_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'install') + env['CM_NVCC_BIN_WITH_PATH'] = os.path.join(os.getcwd(), 'install', 'bin', nvcc_bin) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVCC_BIN_WITH_PATH'] + + # Set CUDA_RUN_FILE_LOCAL_PATH to empty if not set for backwards compatibility in download file + env['CUDA_RUN_FILE_LOCAL_PATH'] = env.get('CUDA_RUN_FILE_LOCAL_PATH','') + + return {'return':0} diff --git a/script/install-cuda-prebuilt/run.sh b/script/install-cuda-prebuilt/run.sh new file mode 100644 index 0000000000..87170aa400 --- /dev/null +++ b/script/install-cuda-prebuilt/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +CUR=${PWD} + +INSTALL_DIR=${CUR}/install + +cmd="${CM_SUDO} bash ${CM_CUDA_RUN_FILE_PATH} --toolkitpath=${INSTALL_DIR} --defaultroot=${INSTALL_DIR} --toolkit ${CUDA_ADDITIONAL_INSTALL_OPTIONS} --silent --override" +echo "${cmd}" +eval "${cmd}" + diff --git a/script/install-gcc-src/README.md b/script/install-gcc-src/README.md new file mode 100644 index 0000000000..bfa3bd5e7e --- /dev/null +++ b/script/install-gcc-src/README.md @@ -0,0 +1,128 @@ +Automatically generated README for this automation recipe: **install-gcc-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-gcc-src,faae0ebd6e1242db) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gcc-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,src,gcc,src-gcc* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install src gcc src-gcc" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,src,gcc,src-gcc` + +`cm run script --tags=install,src,gcc,src-gcc ` + +*or* + +`cmr "install src gcc src-gcc"` + +`cmr "install src gcc src-gcc " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,gcc,src-gcc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,src,gcc,src-gcc"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,src,gcc,src-gcc) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install src gcc src-gcc" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `12` + +* `master` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gcc-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gcc-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gcc-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gcc-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gcc-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gcc-src/_cm.json)*** + * get,gcc + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + +___ +### Script output +`cmr "install src gcc src-gcc " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-gcc-src/_cm.json b/script/install-gcc-src/_cm.json new file mode 100644 index 0000000000..f0a917fa97 --- /dev/null +++ b/script/install-gcc-src/_cm.json @@ -0,0 +1,40 @@ +{ + "alias": "install-gcc-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Compiler automation", + "cache": true, + "default_version": "12", + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + "CM_GIT_URL": "git://gcc.gnu.org/git/gcc.git" + }, + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,gcc" + } + ], + "tags": [ + "install", + "src", + "gcc", + "src-gcc" + ], + "uid": "faae0ebd6e1242db", + "versions": { + "master": { + "env": { + "CM_GIT_CHECKOUT": "master" + } + } + } +} diff --git a/script/install-gcc-src/customize.py b/script/install-gcc-src/customize.py new file mode 100644 index 0000000000..caff463edc --- /dev/null +++ b/script/install-gcc-src/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION','') + if need_version == '': + return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + + print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + + if 'CM_GIT_CHECKOUT' not in env: + env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version + + env['CM_GCC_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') + + return {'return':0} diff --git a/script/install-gcc-src/run.sh b/script/install-gcc-src/run.sh new file mode 100644 index 0000000000..472f4e9c1a --- /dev/null +++ b/script/install-gcc-src/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "******************************************************" + +if [ ! -d "src" ]; then + echo "Cloning GCC from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT}..." + git clone -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} src + if [ "${?}" != "0" ]; then exit 1; fi +fi + +mkdir -p install +mkdir -p build + +INSTALL_DIR="${CUR_DIR}/install" + +echo "******************************************************" +cd src +./contrib/download_prerequisites +cd ../build + +../src/configure --prefix="${INSTALL_DIR}" --with-gcc-major-version-only --disable-multilib + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +make -j${CM_MAKE_CORES} +if [ "${?}" != "0" ]; then exit 1; fi +make install +if [ "${?}" != "0" ]; then exit 1; fi + +# Clean build directory (too large) +cd ${CUR_DIR} +rm -rf build + +echo "******************************************************" +echo "GCC was built and installed to ${INSTALL_DIR} ..." diff --git a/script/install-generic-conda-package/README.md b/script/install-generic-conda-package/README.md new file mode 100644 index 0000000000..01e962f232 --- /dev/null +++ b/script/install-generic-conda-package/README.md @@ -0,0 +1,160 @@ +Automatically generated README for this automation recipe: **install-generic-conda-package** + +Category: **Python automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-generic-conda-package,d9275487f5314195) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package` + +`cm run script --tags=get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package[,variations] ` + +*or* + +`cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package"` + +`cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get install generic generic-conda-lib conda-lib conda-package generic-conda-package[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_name.#` + - Workflow: + * `_package.#` + - Environment variables: + - *CM_CONDA_PKG_NAME*: `#` + - Workflow: + +
+ + + * Group "**package-source**" +
+ Click here to expand this section. + + * `_source.#` + - Environment variables: + - *CM_CONDA_PKG_SRC*: `#` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,conda + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,conda + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-generic-conda-package/_cm.json) + +___ +### Script output +`cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [,variations]" -j` +#### New environment keys (filter) + +* `CM_PYTHONLIB_*` +#### New environment keys auto-detected from customize diff --git a/script/install-generic-conda-package/_cm.json b/script/install-generic-conda-package/_cm.json new file mode 100644 index 0000000000..7cc13f5e28 --- /dev/null +++ b/script/install-generic-conda-package/_cm.json @@ -0,0 +1,70 @@ +{ + "alias": "install-generic-conda-package", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Python automation", + "clean_files": [], + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "conda" + ], + "tags": "get,conda" + }, + { + "names": [ + "conda" + ], + "tags": "get,conda" + } + ], + "extra_cache_tags_from_env": [ + { + "env": "CM_PYTHON_CACHE_TAGS", + "prefix": "python-" + } + ], + "local_env_keys": [ + "CM_GENERIC_PYTHON_PACKAGE_VARIANT" + ], + "new_env_keys": [ + "CM_PYTHONLIB_*" + ], + "tags": [ + "get", + "install", + "generic", + "generic-conda-lib", + "conda-lib", + "conda-package", + "generic-conda-package" + ], + "uid": "d9275487f5314195", + "variations": { + "package.#": { + "env": { + "CM_CONDA_PKG_NAME": "#" + } + }, + "source.#": { + "group": "package-source", + "env": { + "CM_CONDA_PKG_SRC": "#" + } + }, + "name.#": { + "ad": { + "conda": { + "tags": "_name.#" + } + } + } + } +} diff --git a/script/install-generic-conda-package/customize.py b/script/install-generic-conda-package/customize.py new file mode 100644 index 0000000000..5f7905d592 --- /dev/null +++ b/script/install-generic-conda-package/customize.py @@ -0,0 +1,41 @@ +from cmind import utils +import os +import cmind as cm + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + version_string = env.get('CM_TMP_PIP_VERSION_STRING', '').strip() + package_name = env['CM_CONDA_PKG_NAME'].strip() + + install_cmd = env['CM_CONDA_BIN_WITH_PATH'] + " install -y " + if env.get('CM_CONDA_PKG_SRC', '') != '': + install_cmd += " -c "+env['CM_CONDA_PKG_SRC'] + " " + + install_cmd += package_name + install_cmd += version_string + + env['CM_CONDA_PKG_INSTALL_CMD'] = install_cmd + + + return {'return':0} + +def detect_version(i): + + # TBD + print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return':0, 'version':version} + + +def postprocess(i): + + env = i['env'] + version = env.get('CM_VERSION', '') + + return {'return':0, 'version': version} diff --git a/script/install-generic-conda-package/run.sh b/script/install-generic-conda-package/run.sh new file mode 100644 index 0000000000..68a48d9ee5 --- /dev/null +++ b/script/install-generic-conda-package/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + + +cmd="${CM_CONDA_PKG_INSTALL_CMD}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/install-gflags/README.md b/script/install-gflags/README.md new file mode 100644 index 0000000000..695bf28180 --- /dev/null +++ b/script/install-gflags/README.md @@ -0,0 +1,129 @@ +Automatically generated README for this automation recipe: **install-gflags** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-gflags,10bb562c29ea459e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,src,get,gflags* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install src get gflags" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,src,get,gflags` + +`cm run script --tags=install,src,get,gflags ` + +*or* + +`cmr "install src get gflags"` + +`cmr "install src get gflags " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,get,gflags' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,src,get,gflags"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,src,get,gflags) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install src get gflags" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `2.2.2` + +* `2.2.2` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-gflags/_cm.json) + +___ +### Script output +`cmr "install src get gflags " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-gflags/_cm.json b/script/install-gflags/_cm.json new file mode 100644 index 0000000000..f34de616e3 --- /dev/null +++ b/script/install-gflags/_cm.json @@ -0,0 +1,37 @@ +{ + "alias": "install-gflags", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "default_version": "2.2.2", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "tags": "get,cmake", + "version_min": "3.1" + } + ], + "env": {}, + "new_env_keys": [ + ], + "tags": [ + "install", + "src", + "get", + "gflags" + ], + "uid": "10bb562c29ea459e", + "versions": { + "2.2.2": { + "env": { + "CM_VERSION": "2.2.2" + } + } + } +} diff --git a/script/install-gflags/customize.py b/script/install-gflags/customize.py new file mode 100644 index 0000000000..65872c79a0 --- /dev/null +++ b/script/install-gflags/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION','') + if need_version == '': + return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + + print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + + return {'return':0} + +def postprocess(i): + inp = i['input'] + env = i['env'] + return {'return':0} diff --git a/script/install-gflags/run.sh b/script/install-gflags/run.sh new file mode 100644 index 0000000000..881eb6b75d --- /dev/null +++ b/script/install-gflags/run.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "***********************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} +CM_WGET_URL=https://github.com/gflags/gflags/archive/refs/tags/v${CM_VERSION}.tar.gz +wget -nc ${CM_WGET_URL} +test $? -eq 0 || exit 1 +tar -xzf "v${CM_VERSION}.tar.gz" && cd gflags-${CM_VERSION} +test $? -eq 0 || exit 1 +rm -rf build +mkdir build && cd build +cmake .. +make -j${CM_MAKE_CORES} +test $? -eq 0 || exit 1 +sudo make install diff --git a/script/install-github-cli/README.md b/script/install-github-cli/README.md new file mode 100644 index 0000000000..df2074423d --- /dev/null +++ b/script/install-github-cli/README.md @@ -0,0 +1,123 @@ +Automatically generated README for this automation recipe: **install-github-cli** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-github-cli,cd948ec309344bf8) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,gh,github,cli,github-cli* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install gh github cli github-cli" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,gh,github,cli,github-cli` + +`cm run script --tags=install,gh,github,cli,github-cli ` + +*or* + +`cmr "install gh github cli github-cli"` + +`cmr "install gh github cli github-cli " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,gh,github,cli,github-cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,gh,github,cli,github-cli"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,gh,github,cli,github-cli) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install gh github cli github-cli" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/_cm.json) + 1. ***Run native script if exists*** + * [run-macos.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/run-macos.sh) + * [run-rhel.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/run-rhel.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-github-cli/_cm.json) + +___ +### Script output +`cmr "install gh github cli github-cli " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-github-cli/_cm.json b/script/install-github-cli/_cm.json new file mode 100644 index 0000000000..0cd47b5db0 --- /dev/null +++ b/script/install-github-cli/_cm.json @@ -0,0 +1,21 @@ +{ + "alias": "install-github-cli", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "clean_files": [], + "deps": [ + { + "tags": "detect,os" + } + ], + "tags": [ + "install", + "gh", + "github", + "cli", + "github-cli" + ], + "uid": "cd948ec309344bf8" +} diff --git a/script/install-github-cli/customize.py b/script/install-github-cli/customize.py new file mode 100644 index 0000000000..cd7d65a35b --- /dev/null +++ b/script/install-github-cli/customize.py @@ -0,0 +1,13 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + env['CM_TMP_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') + env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' + + return {'return':0} diff --git a/script/install-github-cli/run-macos.sh b/script/install-github-cli/run-macos.sh new file mode 100644 index 0000000000..6a329e98cb --- /dev/null +++ b/script/install-github-cli/run-macos.sh @@ -0,0 +1 @@ +brew install gh diff --git a/script/install-github-cli/run-rhel.sh b/script/install-github-cli/run-rhel.sh new file mode 100644 index 0000000000..e3ef08f5c0 --- /dev/null +++ b/script/install-github-cli/run-rhel.sh @@ -0,0 +1,3 @@ +sudo dnf install -y 'dnf-command(config-manager)' +sudo dnf config-manager --add-repo https://cli.github.com/packages/rpm/gh-cli.repo +sudo dnf install -y gh diff --git a/script/install-github-cli/run.sh b/script/install-github-cli/run.sh new file mode 100644 index 0000000000..74aa873d13 --- /dev/null +++ b/script/install-github-cli/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash +curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \ +&& sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \ +&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ +&& sudo apt update \ +&& sudo apt install gh -y +test $? -eq 0 || exit 1 diff --git a/script/install-ipex-from-src/README.md b/script/install-ipex-from-src/README.md new file mode 100644 index 0000000000..cd53770cd9 --- /dev/null +++ b/script/install-ipex-from-src/README.md @@ -0,0 +1,198 @@ +Automatically generated README for this automation recipe: **install-ipex-from-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-ipex-from-src,09364fff2bf04516) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-ipex-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,get,src,from.src,ipex,src-ipex* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install get src from.src ipex src-ipex" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,get,src,from.src,ipex,src-ipex` + +`cm run script --tags=install,get,src,from.src,ipex,src-ipex[,variations] ` + +*or* + +`cmr "install get src from.src ipex src-ipex"` + +`cmr "install get src from.src ipex src-ipex [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,ipex,src-ipex' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,get,src,from.src,ipex,src-ipex"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,get,src,from.src,ipex,src-ipex) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install get src from.src ipex src-ipex[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_for-intel-mlperf-inference-v3.1-gptj` + - Environment variables: + - *CM_CONDA_ENV*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,conda,_name.gptj-pt + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,generic,conda-package,_package.python + * CM names: `--adr.['conda-package', 'python3']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.wheel,_source.conda-forge + * CM names: `--adr.['conda-package', 'wheel']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.setuptools,_source.conda-forge + * CM names: `--adr.['conda-package', 'setuptools']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj + - CM script: [install-llvm-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-src) + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/intel/intel-extension-for-pytorch`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/intel/intel-extension-for-pytorch` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/intel/intel-extension-for-pytorch` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-ipex-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,pytorch,from.src + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['pytorch']...` + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * get,git,repo + * CM names: `--adr.['ipex-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-ipex-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-ipex-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-ipex-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-ipex-from-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-ipex-from-src/_cm.json) + +___ +### Script output +`cmr "install get src from.src ipex src-ipex [,variations]" -j` +#### New environment keys (filter) + +* `CM_IPEX_*` +#### New environment keys auto-detected from customize diff --git a/script/install-ipex-from-src/_cm.json b/script/install-ipex-from-src/_cm.json new file mode 100644 index 0000000000..f9774e143f --- /dev/null +++ b/script/install-ipex-from-src/_cm.json @@ -0,0 +1,164 @@ +{ + "alias": "install-ipex-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "names": [ + "pytorch" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,pytorch,from.src" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_IPEX_SRC_REPO_PATH" + }, + "extra_cache_tags": "ipex,src,ipex-src,ipex-src-repo", + "names": [ + "ipex-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + }, + "name": "Build IPEX from sources", + "new_env_keys": [ + "CM_IPEX_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "ipex", + "src-ipex" + ], + "uid": "09364fff2bf04516", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "for-intel-mlperf-inference-v3.1-gptj": { + "adr": { + "conda-package": { + "tags": "_name.gptj-pt" + }, + "pytorch": { + "tags": "_for-intel-mlperf-inference-v3.1-gptj" + } + }, + "base": [ + "branch.v2.1.0.dev+cpu.llm.mlperf" + ], + "deps": [ + { + "names": [ + "conda" + ], + "tags": "get,conda,_name.gptj-pt" + }, + { + "names": [ + "conda-package", + "python3" + ], + "tags": "get,generic,conda-package,_package.python", + "version": "3.9" + }, + { + "names": [ + "conda-package", + "wheel" + ], + "tags": "get,generic,conda-package,_package.wheel,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "setuptools" + ], + "tags": "get,generic,conda-package,_package.setuptools,_source.conda-forge" + }, + { + "tags": "install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj" + } + ], + "env": { + "CM_CONDA_ENV": "yes" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/intel/intel-extension-for-pytorch": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/intel/intel-extension-for-pytorch" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "ipex-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-ipex-from-src/customize.py b/script/install-ipex-from-src/customize.py new file mode 100644 index 0000000000..a595ff7efd --- /dev/null +++ b/script/install-ipex-from-src/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + env['IPEX_DIR'] = env['CM_IPEX_SRC_REPO_PATH'] + env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 + env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] + env['LLVM_DIR'] = os.path.join(env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + + run_cmd="python setup.py clean && python setup.py install" + + env['CM_RUN_DIR'] = env['IPEX_DIR'] + env['CM_RUN_CMD'] = run_cmd + + return {'return':0} diff --git a/script/install-ipex-from-src/run.sh b/script/install-ipex-from-src/run.sh new file mode 100644 index 0000000000..d426d4004e --- /dev/null +++ b/script/install-ipex-from-src/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:${PATH} + +cd ${CM_RUN_DIR} +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/script/install-llvm-prebuilt/README-extra.md b/script/install-llvm-prebuilt/README-extra.md new file mode 100644 index 0000000000..1ad1e122be --- /dev/null +++ b/script/install-llvm-prebuilt/README-extra.md @@ -0,0 +1,99 @@ +# Get LLVM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). + +## Exported Variables +* `CM_LLVM_CLANG_BIN` +* `CM_LLVM_CLANG_BIN_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_WITH_PATH` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_COMPILER_*` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 +3. Windows 10, 11 + +# CLI + +## Default +```bash +cm run script "install llvm prebuilt" +``` +or +```bash +cm run script --tags=get,llvm +``` + +## Version + +```bash +cm run script "install llvm prebuilt" --version=14.0.0 +``` + +## Version min +```bash +cm run script "install llvm prebuilt" --version_min=12.0.0 +``` + +## Version max +```bash +cm run script "install llvm prebuilt" --version_max=13.999.999 --version_max_usable=13.0.0 +``` + +## Force new detection even if llvm is already found and cached +```bash +cm run script "install llvm prebuilt" --new +``` + +## Test + +```bash +cm run script "app image corner-detection" +``` + +## Reproducibility matrix + +*Test detection and installation on different platforms* + +* Windows, Linux, MacOS + +### Ubuntu 22.04 + +* 17.0.6 +* 17.0.5 +* 17.0.4 +* 17.0.2 +* 16.0.4 +* 16.0.0 + `sudo apt install libncurses5` +* 15.0.6 +* 14.0.0 + + +### RHEL 9 + +#### v14.0.0: ✓ + +```bash +cm rm cache -f +cm run script "install llvm prebuilt" --version=14.0.0 +cm run script "app image corner-detection" +``` + +#### v13.0.0: Need special command + +```bash +cm rm cache -f +cm run script "install llvm prebuilt" --version=13.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` + +#### v12.0.0: Need special command + +```bash +cm rm cache -f +cm run script "install llvm prebuilt" --version=12.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` diff --git a/script/install-llvm-prebuilt/README.md b/script/install-llvm-prebuilt/README.md new file mode 100644 index 0000000000..24c9573349 --- /dev/null +++ b/script/install-llvm-prebuilt/README.md @@ -0,0 +1,138 @@ +Automatically generated README for this automation recipe: **install-llvm-prebuilt** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-llvm-prebuilt,cda9094971724a0a) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm` + +`cm run script --tags=install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm ` + +*or* + +`cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm"` + +`cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `15.0.6` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-prebuilt/_cm.json)*** + * get,llvm + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + +___ +### Script output +`cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm " -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_COMPILER_NAME` +* `CM_LLVM_*` +#### New environment keys auto-detected from customize + +* `CM_LLVM_CLANG_BIN_WITH_PATH` +* `CM_LLVM_INSTALLED_PATH` +* `CM_LLVM_PACKAGE` \ No newline at end of file diff --git a/script/install-llvm-prebuilt/_cm.json b/script/install-llvm-prebuilt/_cm.json new file mode 100644 index 0000000000..369fc72b17 --- /dev/null +++ b/script/install-llvm-prebuilt/_cm.json @@ -0,0 +1,40 @@ +{ + "alias": "install-llvm-prebuilt", + "automation_alias": "script", + "category": "Compiler automation", + "automation_uid": "5b4e0237da074764", + "cache": true, + "default_version": "15.0.6", + "deps": [ + { + "tags": "detect,os" + } + ], + "name": "Install prebuilt LLVM compiler", + "new_env_keys": [ + "CM_LLVM_*", + "CM_COMPILER_NAME", + "+PATH", + "+LD_LIBRARY_PATH", + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH" + ], + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,llvm" + } + ], + "tags": [ + "install", + "prebuilt", + "llvm", + "prebuilt-llvm", + "install-prebuilt-llvm" + ], + "uid": "cda9094971724a0a" +} diff --git a/script/install-llvm-prebuilt/customize.py b/script/install-llvm-prebuilt/customize.py new file mode 100644 index 0000000000..1550c0ed9e --- /dev/null +++ b/script/install-llvm-prebuilt/customize.py @@ -0,0 +1,208 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION','') + clang_file_name = "clang" + if need_version == '': + return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + + print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + + host_os_bits = env['CM_HOST_OS_BITS'] + + if os_info['platform'] != 'windows': + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + + # Prepare package name + # First check if it is forced by external environment + package_name = env.get('CM_LLVM_PACKAGE','').strip() + if package_name == '': + need_version_split = need_version.split('.') + + # If package_name if not forced, attempt to synthesize it based on OS and arch + if os_info['platform'] == 'darwin': + force_arch = env.get('CM_LLVM_PACKAGE_FORCE_ARCH','') # To allow x86_64 if needed + if force_arch == '': force_arch = 'arm64' + force_darwin_version = env.get('CM_LLVM_PACKAGE_FORCE_DARWIN_VERSION','') + if force_darwin_version == '': + if len(need_version_split)>0: + hver = 0 + try: + hver = int(need_version_split[0]) + except: + pass + + if hver>0 and hver<16: + force_darwin_version = '21.0' + else: + force_darwin_version = '22.0' + package_name = 'clang+llvm-' + need_version + '-'+force_arch+'-apple-darwin'+force_darwin_version+'.tar.xz' + + elif os_info['platform'] == 'windows': + package_name = 'LLVM-' + need_version + '-win' + host_os_bits + '.exe' + clang_file_name = "clang.exe" + + print('') + print('WARNING: Please copy the following path and then paste it') + print(' when LLVM installer asks you about the "Destination Folder":') + print('') + print(os.getcwd()) + print('') + input('Press Enter to continue!') + + else: + if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): + if host_os_bits=='64': + package_name = 'clang+llvm-' + need_version + '-aarch64-linux-gnu.tar.xz' + else: + package_name = 'clang+llvm-' + need_version + '-armv7a-linux-gnueabihf.tar.xz' + else: + host_os_flavor = env['CM_HOST_OS_FLAVOR'] + + host_os_version = env['CM_HOST_OS_VERSION'] + +# if 'debian' in host_os_flavor: +# return {'return':1, 'error':'debian is not supported yet'} +# +# else: + # Treat all Linux flavours as Ubuntu for now ... + + if True: + default_os = '22.04' + + if len(need_version_split)>0: + hver = 0 + try: + hver = int(need_version_split[0]) + except: + pass + + if hver>0: + if hver<16: + default_os='18.04' + else: + default_os='22.04' + + if need_version == '10.0.1': + default_os = '16.04' + + elif need_version == '11.0.0': + default_os = '20.04' + + elif need_version == '11.0.1': + default_os = '16.04' + if host_os_version == '20.10': + default_os = '20.10' + + elif need_version == '12.0.0': + default_os = '16.04' + if host_os_version == '20.04' or host_os_version == '20.10': + default_os = '20.04' + + elif need_version == '12.0.1': + default_os = '16.04' + #if host_os_version.startswith('18') or host_os_version.startswith('20'): + # default_os = '18.04' + + elif need_version == '13.0.0': + default_os = '16.04' + if host_os_version.startswith('20'): + default_os = '20.04' + + elif need_version == '13.0.1': + default_os = '18.04' + + elif need_version == '14.0.0': + default_os = '18.04' + + elif need_version == '15.0.6': + default_os = '18.04' + + elif need_version == '16.0.0': + default_os = '18.04' + + elif need_version == '16.0.4': + default_os = '22.04' + + elif need_version == '17.0.2': + default_os = '22.04' + + elif need_version == '17.0.2': + default_os = '22.04' + + elif need_version == '17.0.4': + default_os = '22.04' + + elif need_version == '17.0.5': + default_os = '22.04' + + elif need_version == '17.0.6': + default_os = '22.04' + + package_name = 'clang+llvm-' + need_version + '-x86_64-linux-gnu-ubuntu-' + default_os + '.tar.xz' + + + package_url = 'https://github.com/llvm/llvm-project/releases/download/llvmorg-' + need_version + '/' + package_name + + print (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + + print ('') + print ('Downloading from {} ...'.format(package_url)) + + cm = automation.cmind + + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':package_url}) + if r['return']>0: return r + + filename = r['filename'] # 'clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz' # f['filename'] + + env['CM_LLVM_PACKAGE'] = filename + env['CM_LLVM_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'bin') + env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join(os.getcwd(), 'bin', clang_file_name) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + # We don't need to check default paths here because we force install to cache + env['+PATH'] = [env['CM_LLVM_INSTALLED_PATH']] + + path_include = os.path.join(os.getcwd(), 'include') + if os.path.isdir(path_include): + env['+C_INCLUDE_PATH'] = [ path_include ] + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + version = env['CM_VERSION'] + os_info = i['os_info'] + +# cur_dir = os.getcwd() +# cur_dir_include = os.path.join(cur_dir, 'include') + +# if os.path.isdir(cur_dir_include): +# if os_info['platform'] == 'darwin': +# if '+C_INCLUDE_PATH' not in env: +# env['+C_INCLUDE_PATH'] = [] +# if cur_dir_include not in env['+C_INCLUDE_PATH']: +# env['+C_INCLUDE_PATH'].append(cur_dir_include) +# +# if '+CPLUS_INCLUDE_PATH' not in env: +# env['+CPLUS_INCLUDE_PATH'] = [] +# if cur_dir_include not in env['+CPLUS_INCLUDE_PATH']: +# env['+CPLUS_INCLUDE_PATH'].append(cur_dir_include) + + + return {'return':0, 'version': version} diff --git a/script/install-llvm-prebuilt/run.bat b/script/install-llvm-prebuilt/run.bat new file mode 100644 index 0000000000..922a0d8edc --- /dev/null +++ b/script/install-llvm-prebuilt/run.bat @@ -0,0 +1,3 @@ +echo Running %CM_LLVM_PACKAGE% ... + +%CM_LLVM_PACKAGE% --help diff --git a/script/install-llvm-prebuilt/run.sh b/script/install-llvm-prebuilt/run.sh new file mode 100644 index 0000000000..1ace2bb27c --- /dev/null +++ b/script/install-llvm-prebuilt/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +echo "" +echo "Unarchiving ${CM_LLVM_PACKAGE} ..." + +tar --strip 1 -xf ${CM_LLVM_PACKAGE} +test $? -eq 0 || exit 1 + +rm -f ${CM_LLVM_PACKAGE} +test $? -eq 0 || exit 1 diff --git a/script/install-llvm-src/README.md b/script/install-llvm-src/README.md new file mode 100644 index 0000000000..7991e0ab8d --- /dev/null +++ b/script/install-llvm-src/README.md @@ -0,0 +1,291 @@ +Automatically generated README for this automation recipe: **install-llvm-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-llvm-src,2af16e9a6c5f4702) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,src,llvm,from.src,src-llvm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install src llvm from.src src-llvm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,src,llvm,from.src,src-llvm` + +`cm run script --tags=install,src,llvm,from.src,src-llvm[,variations] ` + +*or* + +`cmr "install src llvm from.src src-llvm"` + +`cmr "install src llvm from.src src-llvm [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,llvm,from.src,src-llvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,src,llvm,from.src,src-llvm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,src,llvm,from.src,src-llvm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install src llvm from.src src-llvm[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_for-intel-mlperf-inference-v3.1-bert` + - Environment variables: + - *CM_LLVM_CONDA_ENV*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,gcc + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + * get,conda,_name.bert-pt + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,conda-package,_package.ncurses,_source.conda-forge + * CM names: `--adr.['conda-package', 'ncurses']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.ninja + * CM names: `--adr.['conda-package', 'ninja']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.cmake + * CM names: `--adr.['conda-package', 'cmake']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,conda-package,_package.llvm-openmp,_source.conda-forge + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,conda-package,_package.chardet + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + * CM names: `--adr.['conda-package', 'libstdcxx-ng']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * `_for-intel-mlperf-inference-v3.1-gptj` + - Environment variables: + - *CM_LLVM_CONDA_ENV*: `yes` + - *CM_LLVM_16_INTEL_MLPERF_INFERENCE*: `yes` + - *USE_CUDA*: `0` + - *CUDA_VISIBLE_DEVICES*: `` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-sys-util,_g++-12 + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,gcc + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + * get,conda,_name.gptj-pt + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,generic,conda-package,_package.python + * CM names: `--adr.['conda-package', 'python']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,conda-package,_package.ncurses,_source.conda-forge + * CM names: `--adr.['conda-package', 'ncurses']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,conda-package,_package.chardet + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + * CM names: `--adr.['conda-package', 'libstdcxx-ng']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.mkl,_source.intel + * CM names: `--adr.['conda-package', 'mkl']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.mkl-include,_source.intel + * CM names: `--adr.['conda-package', 'mkl-include']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.intel-openmp,_source.intel + * CM names: `--adr.['conda-package', 'intel-openmp']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.gperftools,_source.conda-forge + * CM names: `--adr.['conda-package', 'gperftools']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.pybind11,_source.conda-forge + * CM names: `--adr.['conda-package', 'pybind11']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_custom-python,_package.setuptools + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_custom-python,_package.neural-compressor,_url.git+https://github.com/intel/neural-compressor.git@a2931eaa4052eec195be3c79a13f7bfa23e54473 + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_full-history` + - Workflow: + * `_runtimes.#` + - Environment variables: + - *CM_LLVM_ENABLE_RUNTIMES*: `#` + - Workflow: + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**build-type**" +
+ Click here to expand this section. + + * `_debug` + - Environment variables: + - *CM_LLVM_BUILD_TYPE*: `debug` + - Workflow: + * **`_release`** (default) + - Environment variables: + - *CM_LLVM_BUILD_TYPE*: `release` + - Workflow: + +
+ + + * Group "**clang**" +
+ Click here to expand this section. + + * **`_clang`** (default) + - Environment variables: + - *CM_LLVM_ENABLE_PROJECTS*: `clang` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + +
+ + +#### Default variations + +`_clang,_release` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,cmake + * `if (CM_LLVM_CONDA_ENV != yes)` + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,generic-sys-util,_ninja-build + * `if (CM_LLVM_CONDA_ENV != yes)` + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,git,repo + * CM names: `--adr.['llvm-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-llvm-src/_cm.json)*** + * get,llvm + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + +___ +### Script output +`cmr "install src llvm from.src src-llvm [,variations]" -j` +#### New environment keys (filter) + +* `+C_INCLUDE_PATH` +* `+PATH` +* `CM_GET_DEPENDENT_CACHED_PATH` +* `CM_LLVM_*` +#### New environment keys auto-detected from customize + +* `CM_GET_DEPENDENT_CACHED_PATH` +* `CM_LLVM_CLANG_BIN_WITH_PATH` +* `CM_LLVM_CMAKE_CMD` +* `CM_LLVM_INSTALLED_PATH` \ No newline at end of file diff --git a/script/install-llvm-src/_cm.json b/script/install-llvm-src/_cm.json new file mode 100644 index 0000000000..3ae795695e --- /dev/null +++ b/script/install-llvm-src/_cm.json @@ -0,0 +1,307 @@ +{ + "alias": "install-llvm-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Compiler automation", + "cache": true, + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "tags": "get,cmake", + "skip_if_env": { + "CM_LLVM_CONDA_ENV": [ "yes" ] + } + }, + { + "tags": "get,generic-sys-util,_ninja-build", + "skip_if_env": { + "CM_LLVM_CONDA_ENV": [ "yes" ] + } + }, + { + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_repo.": [ "CM_GIT_URL" ], + "_branch.": [ "CM_GIT_CHECKOUT" ], + "_tag.": [ "CM_GIT_CHECKOUT_TAG" ], + "_tag.llvmorg-": [ "CM_VERSION" ], + "_sha.": [ "CM_GIT_CHECKOUT_SHA" ] + }, + "force_env_keys": [ + "CM_GIT_*" + ], + "names": [ + "llvm-src-repo" + ], + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_LLVM_SRC_REPO_PATH" + }, + "extra_cache_tags": "llvm,src,llvm-src,llvm-src-repo" + } + ], + "env": { + "CM_GIT_URL": "https://github.com/llvm/llvm-project" + }, + "name": "Build LLVM compiler from sources (can take >30 min)", + "new_env_keys": [ + "CM_LLVM_*", + "CM_GET_DEPENDENT_CACHED_PATH", + "+PATH", + "+C_INCLUDE_PATH" + ], + "prehook_deps": [ + ], + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,llvm" + } + ], + "sort": 1000, + "tags": [ + "install", + "src", + "llvm", + "from.src", + "src-llvm" + ], + "uid": "2af16e9a6c5f4702", + "variations": { + "release": { + "group": "build-type", + "default": true, + "env": { + "CM_LLVM_BUILD_TYPE": "release" + } + }, + "debug": { + "group": "build-type", + "env": { + "CM_LLVM_BUILD_TYPE": "debug" + } + }, + "full-history": { + "ad": { + "llvm-src-repo": { + "tags": "_full-history" + } + } + }, + "repo.#": { + "group": "repo", + "env": { + "CM_GIT_URL": "#" + } + }, + "clang": { + "group": "clang", + "default": true, + "env": { + "CM_LLVM_ENABLE_PROJECTS": "clang" + } + }, + "runtimes.#": { + "env": { + "CM_LLVM_ENABLE_RUNTIMES": "#" + } + }, + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "base": [ + "full-history" + ], + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + }, + "for-intel-mlperf-inference-v3.1-bert": { + "base": [ + "tag.llvmorg-15.0.7", + "clang", + "release" + ], + "adr": { + "conda-package": { + "tags": "_name.bert-pt" + } + }, + "deps": [ + { + "tags": "get,gcc" + }, + { + "tags": "get,conda,_name.bert-pt", + "names": [ "conda" ] + }, + { + "names": [ + "conda-package", + "ncurses" + ], + "tags": "get,conda-package,_package.ncurses,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "ninja" + ], + "tags": "get,generic,conda-package,_package.ninja" + }, + { + "names": [ + "conda-package", + "cmake" + ], + "tags": "get,generic,conda-package,_package.cmake" + }, + { + "tags": "get,conda-package,_package.llvm-openmp,_source.conda-forge" + }, + { + "tags": "get,conda-package,_package.chardet" + }, + { + "names": [ + "conda-package", + "libstdcxx-ng" + ], + "tags": "get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge" + } + ], + "env": { + "CM_LLVM_CONDA_ENV": "yes" + } + }, + "for-intel-mlperf-inference-v3.1-gptj": { + "base": [ + "tag.llvmorg-16.0.6", + "clang", + "release" + ], + "adr": { + "conda-package": { + "tags": "_name.gptj-pt" + } + }, + "deps": [ + { + "tags": "get,generic-sys-util,_g++-12" + }, + { + "tags": "get,gcc", + "version_min": "12.1" + }, + { + "tags": "get,conda,_name.gptj-pt", + "names": [ "conda" ] + }, + { + "tags": "get,generic,conda-package,_package.python", + "names": [ "conda-package", "python" ], + "version": "3.9" + }, + { + "names": [ + "conda-package", + "ncurses" + ], + "tags": "get,conda-package,_package.ncurses,_source.conda-forge" + }, + { + "tags": "get,conda-package,_package.chardet" + }, + { + "names": [ + "conda-package", + "libstdcxx-ng" + ], + "tags": "get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "mkl" + ], + "tags": "get,generic,conda-package,_package.mkl,_source.intel", + "version": "2023.1.0" + }, + { + "names": [ + "conda-package", + "mkl-include" + ], + "tags": "get,generic,conda-package,_package.mkl-include,_source.intel", + "version": "2023.1.0" + }, + { + "names": [ + "conda-package", + "intel-openmp" + ], + "tags": "get,generic,conda-package,_package.intel-openmp,_source.intel", + "version": "2023.1.0" + }, + { + "names": [ + "conda-package", + "gperftools" + ], + "tags": "get,generic,conda-package,_package.gperftools,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "pybind11" + ], + "tags": "get,generic,conda-package,_package.pybind11,_source.conda-forge", + "version": "2.10.4" + }, + { + "tags": "get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd", + "env": { + "CM_PYTHON_BIN_WITH_PATH": "<<>>/python3", + "+ CXXFLAGS": [ "-Wno-nonnull", "-Wno-maybe-uninitialized", "-Wno-uninitialized", "-Wno-free-nonheap-object" ] + } + }, + { + "tags": "get,generic-python-lib,_custom-python,_package.setuptools", + "env": { + "CM_PYTHON_BIN_WITH_PATH": "<<>>/python3" + } + }, + { + "tags": "get,generic-python-lib,_custom-python,_package.neural-compressor,_url.git+https://github.com/intel/neural-compressor.git@a2931eaa4052eec195be3c79a13f7bfa23e54473", + "env": { + "CM_PYTHON_BIN_WITH_PATH": "<<>>/python3" + } + } + ], + "env": { + "CM_LLVM_CONDA_ENV": "yes", + "CM_LLVM_16_INTEL_MLPERF_INFERENCE": "yes", + "USE_CUDA": "0", + "CUDA_VISIBLE_DEVICES": "" + } + } + }, + "versions": { + } +} diff --git a/script/install-llvm-src/customize.py b/script/install-llvm-src/customize.py new file mode 100644 index 0000000000..b1bb23250f --- /dev/null +++ b/script/install-llvm-src/customize.py @@ -0,0 +1,69 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + clang_file_name = "clang" + extra_cmake_options = '' + + install_prefix = os.path.join(os.getcwd(), "install") + + if env.get('CM_LLVM_CONDA_ENV', '') == "yes": + install_prefix = env['CM_CONDA_PREFIX'] + extra_cmake_options = f"-DCMAKE_SHARED_LINKER_FLAGS=-L{install_prefix} -Wl,-rpath,{install_prefix}" + + if env.get('CM_LLVM_16_INTEL_MLPERF_INFERENCE', '') == "yes": + env['CM_REQUIRE_INSTALL'] = 'yes' + i['run_script_input']['script_name'] = "install-llvm-16-intel-mlperf-inference" + clang_file_name = "llvm-link" + #env['USE_LLVM'] = install_prefix + #env['LLVM_DIR'] = os.path.join(env['USE_LLVM'], "lib", "cmake", "llvm") + else: + if env.get('CM_LLVM_ENABLE_RUNTIMES', '') != '': + enable_runtimes = env['CM_LLVM_ENABLE_RUNTIMES'].replace(":", ";") + else: + enable_runtimes = '' + + if env.get('CM_LLVM_ENABLE_PROJECTS', '') != '': + enable_projects = env['CM_LLVM_ENABLE_PROJECTS'].replace(":", ";") + else: + enable_projects = '' + + llvm_build_type = env['CM_LLVM_BUILD_TYPE'] + + cmake_cmd = "cmake " + os.path.join(env["CM_LLVM_SRC_REPO_PATH"], "llvm") + " -GNinja -DCMAKE_BUILD_TYPE="+llvm_build_type + " -DLLVM_ENABLE_PROJECTS="+ enable_projects+ " -DLLVM_ENABLE_RUNTIMES='"+enable_runtimes + "' -DCMAKE_INSTALL_PREFIX=" + install_prefix + " -DLLVM_ENABLE_RTTI=ON -DLLVM_INSTALL_UTILS=ON -DLLVM_TARGETS_TO_BUILD=X86 " + extra_cmake_options + + env['CM_LLVM_CMAKE_CMD'] = cmake_cmd + + need_version = env.get('CM_VERSION','') + + #print(cmake_cmd) + + env['CM_LLVM_INSTALLED_PATH'] = install_prefix + env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join(env['CM_LLVM_INSTALLED_PATH'], "bin", clang_file_name) + + #env['+PATH'] = [] + return {'return':0} + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + if env.get('CM_LLVM_CONDA_ENV', '') != "yes": + # We don't need to check default paths here because we force install to cache + env['+PATH'] = [ os.path.join(env['CM_LLVM_INSTALLED_PATH'], "bin") ] + + path_include = os.path.join(env['CM_LLVM_INSTALLED_PATH'], 'include') + if os.path.isdir(path_include): + env['+C_INCLUDE_PATH'] = [ path_include ] + + return {'return':0} diff --git a/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh b/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh new file mode 100644 index 0000000000..df23aa3e33 --- /dev/null +++ b/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:${PATH} +export ABI=$(python -c "import torch; print(int(torch._C._GLIBCXX_USE_CXX11_ABI))") +mkdir -p llvm-project && cd llvm-project +wget -nc https://github.com/llvm/llvm-project/releases/download/llvmorg-16.0.6/cmake-16.0.6.src.tar.xz +wget -nc https://github.com/llvm/llvm-project/releases/download/llvmorg-16.0.6/llvm-16.0.6.src.tar.xz +tar -xf cmake-16.0.6.src.tar.xz +mv cmake-16.0.6.src cmake +tar -xf llvm-16.0.6.src.tar.xz +mv llvm-16.0.6.src llvm +rm -rf build +mkdir -p build +cd build +export DEB_BUILD_MAINT_OPTIONS=hardening=-format +cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=${ABI}" -DLLVM_TARGETS_TO_BUILD=X86 -DLLVM_ENABLE_TERMINFO=OFF -DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_INCLUDE_BENCHMARKS=OFF ../llvm/ + +cmake --build . -j $(nproc) +export LLVM_ROOT=$CONDA_PREFIX +cmake -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DCMAKE_SHARED_LINKER_FLAGS="-L$CONDA_PREFIX -Wl,-rpath,$CONDA_PREFIX" -P cmake_install.cmake +ln -sf ${LLVM_ROOT}/bin/llvm-config ${LLVM_ROOT}/bin/llvm-config-13 diff --git a/script/install-llvm-src/run.sh b/script/install-llvm-src/run.sh new file mode 100644 index 0000000000..60c0efea61 --- /dev/null +++ b/script/install-llvm-src/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +CUR_DIR=$PWD + +INSTALL_DIR="${CM_LLVM_INSTALLED_PATH}" +echo "INSTALL_DIR=${INSTALL_DIR}" + +if [[ ${CM_LLVM_CONDA_ENV} != "yes" ]]; then + cmd="rm -rf ${INSTALL_DIR}" + echo "$cmd" + eval "$cmd" +else + export PATH=${CM_CONDA_BIN_PATH}:$PATH +fi + +if [[ ${CM_CLEAN_BUILD} == "yes" ]]; then + rm -rf build +fi + +mkdir -p build + +# If install exist, then configure was done +if [ ! -d "${INSTALL_DIR}" ] || [ ${CM_LLVM_CONDA_ENV} == "yes" ]; then + echo "******************************************************" + + cd build + if [ "${?}" != "0" ]; then exit 1; fi + + echo "${CM_LLVM_CMAKE_CMD}" + eval "${CM_LLVM_CMAKE_CMD}" + ninja + if [ "${?}" != "0" ]; then exit 1; fi + ninja install + if [ "${?}" != "0" ]; then exit 1; fi + + mkdir -p ${INSTALL_DIR} +fi + +# Clean build directory (too large) +cd ${CUR_DIR} +rm -rf build + +echo "******************************************************" +echo "LLVM is built and installed to ${INSTALL_DIR} ..." diff --git a/script/install-mlperf-logging-from-src/README.md b/script/install-mlperf-logging-from-src/README.md new file mode 100644 index 0000000000..2b955f2899 --- /dev/null +++ b/script/install-mlperf-logging-from-src/README.md @@ -0,0 +1,128 @@ +Automatically generated README for this automation recipe: **install-mlperf-logging-from-src** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-mlperf-logging-from-src,f67cb84a5dc942c3) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *install,mlperf,logging,from.src* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install mlperf logging from.src" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,mlperf,logging,from.src` + +`cm run script --tags=install,mlperf,logging,from.src ` + +*or* + +`cmr "install mlperf logging from.src"` + +`cmr "install mlperf logging from.src " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,mlperf,logging,from.src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,mlperf,logging,from.src"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,mlperf,logging,from.src) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install mlperf logging from.src" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +* `master` +* `v3.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src/_cm.yaml)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo,_repo.https://github.com/mlcommons/logging + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-mlperf-logging-from-src/_cm.yaml) + +___ +### Script output +`cmr "install mlperf logging from.src " -j` +#### New environment keys (filter) + +* `CM_MLPERF_LOGGING_REPO_PATH` +#### New environment keys auto-detected from customize diff --git a/script/install-mlperf-logging-from-src/_cm.yaml b/script/install-mlperf-logging-from-src/_cm.yaml new file mode 100644 index 0000000000..c4d8f86bf9 --- /dev/null +++ b/script/install-mlperf-logging-from-src/_cm.yaml @@ -0,0 +1,36 @@ +alias: install-mlperf-logging-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: + - tags: get,python3 + names: + - python + - python3 + - tags: get,git,repo,_repo.https://github.com/mlcommons/logging + extra_cache_tags: mlperf_logging + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_LOGGING_REPO_PATH +docker_input_mapping: +input_description: +new_env_keys: + - CM_MLPERF_LOGGING_REPO_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- install +- mlperf +- logging +- from.src +uid: f67cb84a5dc942c3 +variations: {} +versions: + master: + env: + CM_MLPERF_LOGGING_VERSION: master + v3.1: + env: + CM_MLPERF_LOGGING_VERSION: v3.1 diff --git a/script/install-mlperf-logging-from-src/customize.py b/script/install-mlperf-logging-from-src/customize.py new file mode 100644 index 0000000000..d12f9b3e1d --- /dev/null +++ b/script/install-mlperf-logging-from-src/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/install-mlperf-logging-from-src/run.sh b/script/install-mlperf-logging-from-src/run.sh new file mode 100644 index 0000000000..de622c9f35 --- /dev/null +++ b/script/install-mlperf-logging-from-src/run.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +run "${CM_PYTHON_BIN_WITH_PATH} -m pip install -e ${CM_MLPERF_LOGGING_REPO_PATH}" diff --git a/script/install-nccl-libs/README.md b/script/install-nccl-libs/README.md new file mode 100644 index 0000000000..7b9e65bb64 --- /dev/null +++ b/script/install-nccl-libs/README.md @@ -0,0 +1,135 @@ +Automatically generated README for this automation recipe: **install-nccl-libs** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-nccl-libs,d1c76da2adb44201) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *install,nccl,libs* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install nccl libs" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,nccl,libs` + +`cm run script --tags=install,nccl,libs[,variations] ` + +*or* + +`cmr "install nccl libs"` + +`cmr "install nccl libs [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,nccl,libs' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,nccl,libs"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,nccl,libs) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install nccl libs[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_cuda` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/_cm.yaml) + 1. ***Run native script if exists*** + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-nccl-libs/_cm.yaml) + +___ +### Script output +`cmr "install nccl libs [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-nccl-libs/_cm.yaml b/script/install-nccl-libs/_cm.yaml new file mode 100644 index 0000000000..8011ab3ad7 --- /dev/null +++ b/script/install-nccl-libs/_cm.yaml @@ -0,0 +1,13 @@ +alias: install-nccl-libs +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- install +- nccl +- libs +uid: d1c76da2adb44201 +variations: + cuda: + deps: + - tags: get,cuda diff --git a/script/install-nccl-libs/customize.py b/script/install-nccl-libs/customize.py new file mode 100644 index 0000000000..d12f9b3e1d --- /dev/null +++ b/script/install-nccl-libs/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/install-nccl-libs/run-ubuntu.sh b/script/install-nccl-libs/run-ubuntu.sh new file mode 100644 index 0000000000..e56074a517 --- /dev/null +++ b/script/install-nccl-libs/run-ubuntu.sh @@ -0,0 +1,2 @@ +CM_SUDO=${CM_SUDO:-sudo} +${CM_SUDO} apt install -y --allow-downgrades libnccl2=2.18.3-1+cuda${CM_CUDA_VERSION} libnccl-dev=2.18.3-1+cuda${CM_CUDA_VERSION} diff --git a/script/install-nccl-libs/run.sh b/script/install-nccl-libs/run.sh new file mode 100644 index 0000000000..3a584c10cf --- /dev/null +++ b/script/install-nccl-libs/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/script/install-numactl-from-src/README.md b/script/install-numactl-from-src/README.md new file mode 100644 index 0000000000..fc1328c04e --- /dev/null +++ b/script/install-numactl-from-src/README.md @@ -0,0 +1,172 @@ +Automatically generated README for this automation recipe: **install-numactl-from-src** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-numactl-from-src,4f355ae8ca1948b2) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-numactl-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,src,from.src,numactl,src-numactl* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install src from.src numactl src-numactl" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,src,from.src,numactl,src-numactl` + +`cm run script --tags=install,src,from.src,numactl,src-numactl[,variations] ` + +*or* + +`cmr "install src from.src numactl src-numactl"` + +`cmr "install src from.src numactl src-numactl [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,from.src,numactl,src-numactl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,src,from.src,numactl,src-numactl"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,src,from.src,numactl,src-numactl) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install src from.src numactl src-numactl[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/numactl/numactl`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/numactl/numactl` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/numactl/numactl` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-numactl-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,git,repo + * CM names: `--adr.['numactl-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-numactl-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-numactl-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-numactl-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-numactl-from-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-numactl-from-src/_cm.json) + +___ +### Script output +`cmr "install src from.src numactl src-numactl [,variations]" -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_NUMACTL_*` +#### New environment keys auto-detected from customize diff --git a/script/install-numactl-from-src/_cm.json b/script/install-numactl-from-src/_cm.json new file mode 100644 index 0000000000..75256a663e --- /dev/null +++ b/script/install-numactl-from-src/_cm.json @@ -0,0 +1,94 @@ +{ + "alias": "install-numactl-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Detection or installation of tools and artifacts", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_NUMACTL_SRC_REPO_PATH" + }, + "extra_cache_tags": "numactl,src,numactl-src,numactl-src-repo", + "names": [ + "numactl-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + "CM_GIT_URL": "https://github.com/numactl/numactl" + }, + "name": "Build numactl from sources", + "new_env_keys": [ + "CM_NUMACTL_*", + "+PATH" + ], + "sort": 1000, + "tags": [ + "install", + "src", + "from.src", + "numactl", + "src-numactl" + ], + "uid": "4f355ae8ca1948b2", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/numactl/numactl": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/numactl/numactl" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "pytorch-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "warnings": [ + "This CM script will need sudo to install numactl!" + ] +} diff --git a/script/install-numactl-from-src/customize.py b/script/install-numactl-from-src/customize.py new file mode 100644 index 0000000000..5c1ee2674f --- /dev/null +++ b/script/install-numactl-from-src/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd="python setup.py install" + + env['CM_RUN_CMD'] = run_cmd + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + env['+PATH'] = [] + + return {'return':0} diff --git a/script/install-numactl-from-src/run.sh b/script/install-numactl-from-src/run.sh new file mode 100644 index 0000000000..606b5d9659 --- /dev/null +++ b/script/install-numactl-from-src/run.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo $PWD +rm -rf numactl +cmd="cp -r ${CM_NUMACTL_SRC_REPO_PATH} numactl" +echo "$cmd" +eval "$cmd" +cd numactl +./autogen.sh +./configure +if [ "${?}" != "0" ]; then exit 1; fi +make +if [ "${?}" != "0" ]; then exit 1; fi +#make install DESTDIR=$CUR_DIR +sudo make install +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/script/install-onednn-from-src/README.md b/script/install-onednn-from-src/README.md new file mode 100644 index 0000000000..baac91350c --- /dev/null +++ b/script/install-onednn-from-src/README.md @@ -0,0 +1,182 @@ +Automatically generated README for this automation recipe: **install-onednn-from-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-onednn-from-src,fe3a652e315f4c8f) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onednn-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,get,src,from.src,onednn,src-onednn* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install get src from.src onednn src-onednn" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,get,src,from.src,onednn,src-onednn` + +`cm run script --tags=install,get,src,from.src,onednn,src-onednn[,variations] ` + +*or* + +`cmr "install get src from.src onednn src-onednn"` + +`cmr "install get src from.src onednn src-onednn [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,onednn,src-onednn' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,get,src,from.src,onednn,src-onednn"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,get,src,from.src,onednn,src-onednn) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install get src from.src onednn src-onednn[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_for-intel-mlperf-inference-v3.1-bert` + - Environment variables: + - *CM_CONDA_ENV*: `yes` + - *CM_FOR_INTEL_MLPERF_INFERENCE*: `yes` + - Workflow: + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/oneapi-src/oneDNN`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/oneapi-src/oneDNN` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/oneapi-src/oneDNN` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onednn-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo + * CM names: `--adr.['onednn-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onednn-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onednn-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run-intel-mlperf-inference.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onednn-from-src/run-intel-mlperf-inference.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onednn-from-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onednn-from-src/_cm.json) + +___ +### Script output +`cmr "install get src from.src onednn src-onednn [,variations]" -j` +#### New environment keys (filter) + +* `CM_ONEDNN_*` +#### New environment keys auto-detected from customize + +* `CM_ONEDNN_INSTALLED_PATH` \ No newline at end of file diff --git a/script/install-onednn-from-src/_cm.json b/script/install-onednn-from-src/_cm.json new file mode 100644 index 0000000000..35d9aba9c6 --- /dev/null +++ b/script/install-onednn-from-src/_cm.json @@ -0,0 +1,122 @@ +{ + "alias": "install-onednn-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_ONEDNN_SRC_REPO_PATH" + }, + "extra_cache_tags": "onednn,src,onednn-src,onednn-src-repo", + "names": [ + "onednn-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + "CM_GIT_URL": "https://github.com/oneapi-src/oneDNN" + }, + "name": "Build oneDNN from sources", + "new_env_keys": [ + "CM_ONEDNN_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "onednn", + "src-onednn" + ], + "uid": "fe3a652e315f4c8f", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "for-intel-mlperf-inference-v3.1-bert": { + "adr": { + "conda-package": { + "tags": "_name.bert-pt" + }, + "oneddn-src-repo": { + "tags": "_norecurse-submodule" + } + }, + "base": [ + "tag.v2.6" + ], + "env": { + "CM_CONDA_ENV": "yes", + "CM_FOR_INTEL_MLPERF_INFERENCE": "yes" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/oneapi-src/oneDNN": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/oneapi-src/oneDNN" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "onednn-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-onednn-from-src/customize.py b/script/install-onednn-from-src/customize.py new file mode 100644 index 0000000000..06e5402241 --- /dev/null +++ b/script/install-onednn-from-src/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd="" + + env['CM_RUN_CMD'] = run_cmd + env['CM_ONEDNN_INSTALLED_PATH'] = os.path.join(os.getcwd(), "onednn") + + if env.get('CM_FOR_INTEL_MLPERF_INFERENCE', '') == "yes": + i['run_script_input']['script_name'] = "run-intel-mlperf-inference" + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return':0} diff --git a/script/install-onednn-from-src/run-intel-mlperf-inference.sh b/script/install-onednn-from-src/run-intel-mlperf-inference.sh new file mode 100644 index 0000000000..77bff6883a --- /dev/null +++ b/script/install-onednn-from-src/run-intel-mlperf-inference.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +CUR_DIR=$PWD +rm -rf onednn +cp -r ${CM_ONEDNN_SRC_REPO_PATH} onednn +cd onednn +rm -rf build +pwd +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/onednnv2_6.patch +if [ "${?}" != "0" ]; then exit 1; fi +cmd="git apply onednnv2_6.patch" + +echo ${cmd} +eval ${cmd} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/script/install-onnxruntime-from-src/README.md b/script/install-onnxruntime-from-src/README.md new file mode 100644 index 0000000000..7170210582 --- /dev/null +++ b/script/install-onnxruntime-from-src/README.md @@ -0,0 +1,185 @@ +Automatically generated README for this automation recipe: **install-onnxruntime-from-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-onnxruntime-from-src,9798c7e7a5944cee) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onnxruntime-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,get,src,from.src,onnxruntime,src-onnxruntime* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install get src from.src onnxruntime src-onnxruntime" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,get,src,from.src,onnxruntime,src-onnxruntime` + +`cm run script --tags=install,get,src,from.src,onnxruntime,src-onnxruntime[,variations] ` + +*or* + +`cmr "install get src from.src onnxruntime src-onnxruntime"` + +`cmr "install get src from.src onnxruntime src-onnxruntime [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,onnxruntime,src-onnxruntime' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,get,src,from.src,onnxruntime,src-onnxruntime"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,get,src,from.src,onnxruntime,src-onnxruntime) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install get src from.src onnxruntime src-onnxruntime[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_ONNXRUNTIME_GPU*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda,_cudnn + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * **`_repo.https://github.com/Microsoft/onnxruntime`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/Microsoft/onnxruntime` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/Microsoft/onnxruntime` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onnxruntime-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * fail,filter,_windows + - CM script: [fail](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/fail) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,gcc + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + * get,git,repo + * CM names: `--adr.['onnxruntime-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onnxruntime-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onnxruntime-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onnxruntime-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onnxruntime-from-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-onnxruntime-from-src/_cm.json) + +___ +### Script output +`cmr "install get src from.src onnxruntime src-onnxruntime [,variations]" -j` +#### New environment keys (filter) + +* `CM_ONNXRUNTIME_*` +#### New environment keys auto-detected from customize diff --git a/script/install-onnxruntime-from-src/_cm.json b/script/install-onnxruntime-from-src/_cm.json new file mode 100644 index 0000000000..a06b735852 --- /dev/null +++ b/script/install-onnxruntime-from-src/_cm.json @@ -0,0 +1,121 @@ +{ + "alias": "install-onnxruntime-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "tags": "fail,filter,_windows" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "tags": "get,cmake", + "version_min": "3.26.0" + }, + { + "tags": "get,gcc", + "version_max": "11.9.999", + "version_max_usable": "11.0" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_ONNXRUNTIME_SRC_REPO_PATH" + }, + "extra_cache_tags": "onnxruntime,src,onnxruntime-src,onnxruntime-src-repo", + "names": [ + "onnxruntime-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + }, + "name": "Build onnxruntime from sources", + "new_env_keys": [ + "CM_ONNXRUNTIME_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "onnxruntime", + "src-onnxruntime" + ], + "uid": "9798c7e7a5944cee", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "cuda": { + "env": { + "CM_ONNXRUNTIME_GPU": "yes" + }, + "deps": [ + { + "names": [ "cuda" ], + "tags": "get,cuda,_cudnn" + } + ] + }, + "repo.https://github.com/Microsoft/onnxruntime": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/Microsoft/onnxruntime" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "onnxruntime-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-onnxruntime-from-src/customize.py b/script/install-onnxruntime-from-src/customize.py new file mode 100644 index 0000000000..be854b226b --- /dev/null +++ b/script/install-onnxruntime-from-src/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd="./build.sh --config RelWithDebInfo --build_wheel --parallel --allow_running_as_root --skip_tests " + + if env.get('CM_ONNXRUNTIME_GPU', '') == "yes": + cuda_home = env['CUDA_HOME'] + run_cmd += f"--use_cuda --cuda_home {cuda_home} --cudnn_home {cuda_home}" + + env['CM_RUN_DIR'] = env['CM_ONNXRUNTIME_SRC_REPO_PATH'] + env['CM_RUN_CMD'] = run_cmd + + return {'return':0} diff --git a/script/install-onnxruntime-from-src/run.sh b/script/install-onnxruntime-from-src/run.sh new file mode 100644 index 0000000000..4a2381af77 --- /dev/null +++ b/script/install-onnxruntime-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +export CC=${CM_GCC_BIN_WITH_PATH} +export CXX=${CM_GCC_INSTALLED_PATH}/g++ + +echo "cd ${CM_RUN_DIR}" +cd ${CM_RUN_DIR} +test $? -eq 0 || exit $? +rm -rf build + +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit $? + +exit 1 diff --git a/script/install-openssl/README.md b/script/install-openssl/README.md new file mode 100644 index 0000000000..c46b1b9841 --- /dev/null +++ b/script/install-openssl/README.md @@ -0,0 +1,135 @@ +Automatically generated README for this automation recipe: **install-openssl** + +Category: **Detection or installation of tools and artifacts** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-openssl,be472d3b1d014169) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,src,openssl,openssl-lib* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install src openssl openssl-lib" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,src,openssl,openssl-lib` + +`cm run script --tags=install,src,openssl,openssl-lib ` + +*or* + +`cmr "install src openssl openssl-lib"` + +`cmr "install src openssl openssl-lib " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,openssl,openssl-lib' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,src,openssl,openssl-lib"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,src,openssl,openssl-lib) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install src openssl openssl-lib" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `1.1.1` + +* `1.1.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-openssl/_cm.json)*** + * get,openssl + * `if (CM_REQUIRE_INSTALL != yes)` + - CM script: [get-openssl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-openssl) + +___ +### Script output +`cmr "install src openssl openssl-lib " -j` +#### New environment keys (filter) + +* `+LD_LIBRARY_PATH` +* `CM_OPENSSL_*` +#### New environment keys auto-detected from customize + +* `CM_OPENSSL_BIN_WITH_PATH` +* `CM_OPENSSL_INSTALLED_PATH` \ No newline at end of file diff --git a/script/install-openssl/_cm.json b/script/install-openssl/_cm.json new file mode 100644 index 0000000000..d340979e56 --- /dev/null +++ b/script/install-openssl/_cm.json @@ -0,0 +1,45 @@ +{ + "alias": "install-openssl", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "default_version": "1.1.1", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + } + ], + "category": "Detection or installation of tools and artifacts", + "env": {}, + "post_deps": [ + { + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,openssl" + } + ], + "new_env_keys": [ + "CM_OPENSSL_*", + "+LD_LIBRARY_PATH" + ], + "tags": [ + "install", + "src", + "openssl", + "openssl-lib" + ], + "uid": "be472d3b1d014169", + "versions": { + "1.1.1": { + "env": { + "CM_VERSION": "1.1.1" + } + } + } +} diff --git a/script/install-openssl/customize.py b/script/install-openssl/customize.py new file mode 100644 index 0000000000..e6163a0f5e --- /dev/null +++ b/script/install-openssl/customize.py @@ -0,0 +1,39 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION','') + if need_version == '': + return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + + print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + + return {'return':0} + +def postprocess(i): + inp = i['input'] + env = i['env'] + tags = inp['tags'] + tag_list = tags.split(",") + install_path = os.path.join(os.getcwd(), 'openssl-'+env['CM_VERSION']+'g', 'install') + path_lib = os.path.join(install_path, 'lib') + if '+LD_LIBRARY_PATH' not in env: + env['+LD_LIBRARY_PATH'] = [] + env['+LD_LIBRARY_PATH'].append(path_lib) + bin_name = "openssl" + path_bin = os.path.join(install_path, 'bin') + env['CM_OPENSSL_INSTALLED_PATH'] = path_bin + env['CM_OPENSSL_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) + return {'return':0} diff --git a/script/install-openssl/run.sh b/script/install-openssl/run.sh new file mode 100644 index 0000000000..2e6502c07e --- /dev/null +++ b/script/install-openssl/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "***********************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} +CM_WGET_URL=https://www.openssl.org/source/openssl-${CM_VERSION}g.tar.gz +wget -nc ${CM_WGET_URL} +test $? -eq 0 || exit 1 +tar -xzf openssl-${CM_VERSION}g.tar.gz && cd openssl-${CM_VERSION}g +test $? -eq 0 || exit 1 +mkdir -p install +./config --prefix=`pwd`/install +make -j${CM_MAKE_CORES} +test $? -eq 0 || exit 1 +make install diff --git a/script/install-pip-package-for-cmind-python/README.md b/script/install-pip-package-for-cmind-python/README.md new file mode 100644 index 0000000000..a4fdf16d0f --- /dev/null +++ b/script/install-pip-package-for-cmind-python/README.md @@ -0,0 +1,132 @@ +Automatically generated README for this automation recipe: **install-pip-package-for-cmind-python** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-pip-package-for-cmind-python,b16ed087abab459c) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pip-package-for-cmind-python)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *install,pip,package,pip-package,for-cmind-python,for.cmind-python* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install pip package pip-package for-cmind-python for.cmind-python" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,pip,package,pip-package,for-cmind-python,for.cmind-python` + +`cm run script --tags=install,pip,package,pip-package,for-cmind-python,for.cmind-python[,variations] ` + +*or* + +`cmr "install pip package pip-package for-cmind-python for.cmind-python"` + +`cmr "install pip package pip-package for-cmind-python for.cmind-python [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,pip,package,pip-package,for-cmind-python,for.cmind-python' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,pip,package,pip-package,for-cmind-python,for.cmind-python"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,pip,package,pip-package,for-cmind-python,for.cmind-python) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install pip package pip-package for-cmind-python for.cmind-python[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_package.#` + - Environment variables: + - *CM_PIP_PACKAGE_NAME*: `#` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pip-package-for-cmind-python/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pip-package-for-cmind-python/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pip-package-for-cmind-python/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pip-package-for-cmind-python/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pip-package-for-cmind-python/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pip-package-for-cmind-python/_cm.yaml) + +___ +### Script output +`cmr "install pip package pip-package for-cmind-python for.cmind-python [,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-pip-package-for-cmind-python/_cm.yaml b/script/install-pip-package-for-cmind-python/_cm.yaml new file mode 100644 index 0000000000..765500d91a --- /dev/null +++ b/script/install-pip-package-for-cmind-python/_cm.yaml @@ -0,0 +1,17 @@ +alias: install-pip-package-for-cmind-python +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- install +- pip +- package +- pip-package +- for-cmind-python +- for.cmind-python +uid: b16ed087abab459c + +variations: + package.#: + env: + CM_PIP_PACKAGE_NAME: "#" diff --git a/script/install-pip-package-for-cmind-python/customize.py b/script/install-pip-package-for-cmind-python/customize.py new file mode 100644 index 0000000000..1630bc5428 --- /dev/null +++ b/script/install-pip-package-for-cmind-python/customize.py @@ -0,0 +1,38 @@ +from cmind import utils +import os +import subprocess +import sys + +def install(package): + additional_install_options = [] + r = subprocess.run([sys.executable, "-m", "pip", "--version"], check=True, capture_output=True) + r = r.stdout.decode("utf-8") + if "pip" in r: + out_split = r.split(" ") + if len(out_split) < 2: + return {'return': 1, 'error': 'Pip version detection failed'} + pip_version = out_split[1].split(".") + if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23: + additional_install_options.append("--break-system-packages") + run_cmd = [sys.executable, "-m", "pip", "install", package] + run_cmd += additional_install_options + r = subprocess.run(run_cmd, check=True) + + return {'return':0} + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + if env.get('CM_PIP_PACKAGE_NAME', '') != '': + r = install(env['CM_PIP_PACKAGE_NAME']) + if r['return'] > 0: + return r + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/install-python-src/README.md b/script/install-python-src/README.md new file mode 100644 index 0000000000..4bfd73e580 --- /dev/null +++ b/script/install-python-src/README.md @@ -0,0 +1,183 @@ +Automatically generated README for this automation recipe: **install-python-src** + +Category: **Python automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-python-src,12d3a608afe14a1e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,src,python,python3,src-python3,src-python* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install src python python3 src-python3 src-python" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,src,python,python3,src-python3,src-python` + +`cm run script --tags=install,src,python,python3,src-python3,src-python[,variations] ` + +*or* + +`cmr "install src python python3 src-python3 src-python"` + +`cmr "install src python python3 src-python3 src-python [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,python,python3,src-python3,src-python' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,src,python,python3,src-python3,src-python"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,src,python,python3,src-python3,src-python) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install src python python3 src-python3 src-python[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_lto` + - Environment variables: + - *CM_PYTHON_LTO_FLAG*: ` --lto` + - *CM_PYTHON_INSTALL_CACHE_TAGS*: `with-lto` + - Workflow: + * `_optimized` + - Environment variables: + - *CM_PYTHON_OPTIMIZATION_FLAG*: ` --enable-optimizations` + - *CM_PYTHON_INSTALL_CACHE_TAGS*: `optimized` + - Workflow: + * `_shared` + - Environment variables: + - *CM_PYTHON_INSTALL_CACHE_TAGS*: `shared` + - *CM_SHARED_BUILD*: `yes` + - Workflow: + * `_with-custom-ssl` + - Environment variables: + - *CM_CUSTOM_SSL*: `yes` + - *CM_PYTHON_INSTALL_CACHE_TAGS*: `with-custom-ssl` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,openssl + - CM script: [get-openssl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-openssl) + * `_with-ssl` + - Environment variables: + - *CM_ENABLE_SSL*: `yes` + - *CM_PYTHON_INSTALL_CACHE_TAGS*: `with-ssl` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_ENABLE_SSL: `no` +* CM_CUSTOM_SSL: `no` +* CM_SHARED_BUILD: `no` +* CM_PYTHON_OPTIMIZATION_FLAG: `` +* CM_PYTHON_LTO_FLAG: `` +* CM_WGET_URL: `https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz` + +
+ +#### Versions +Default version: `3.10.13` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-src/_cm.json)*** + * get,python3 + * `if (CM_REQUIRE_INSTALL != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + +___ +### Script output +`cmr "install src python python3 src-python3 src-python [,variations]" -j` +#### New environment keys (filter) + +* `+C_INCLUDE_PATH` +* `+LD_LIBRARY_PATH` +* `+PATH` +* `CM_PYTHON_BIN_WITH_PATH` +* `CM_PYTHON_INSTALL_PATH` +#### New environment keys auto-detected from customize + +* `CM_PYTHON_BIN_WITH_PATH` \ No newline at end of file diff --git a/script/install-python-src/_cm.json b/script/install-python-src/_cm.json new file mode 100644 index 0000000000..429b0d8c7c --- /dev/null +++ b/script/install-python-src/_cm.json @@ -0,0 +1,93 @@ +{ + "alias": "install-python-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Python automation", + "cache": true, + "default_version": "3.10.13", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + } + ], + "default_env": { + "CM_ENABLE_SSL": "no", + "CM_CUSTOM_SSL": "no", + "CM_SHARED_BUILD": "no", + "CM_PYTHON_OPTIMIZATION_FLAG": "", + "CM_PYTHON_LTO_FLAG": "", + "CM_WGET_URL": "https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz" + }, + "new_env_keys": [ + "CM_PYTHON_INSTALL_PATH", + "CM_PYTHON_BIN_WITH_PATH", + "+PATH", + "+LD_LIBRARY_PATH", + "+C_INCLUDE_PATH" + ], + "post_deps": [ + { + "names": [ + "python", + "python3" + ], + "inherit_variation_tags": "True", + "reuse_version": true, + "skip_if_env": { + "CM_REQUIRE_INSTALL": [ + "yes" + ] + }, + "tags": "get,python3" + } + ], + "tags": [ + "install", + "src", + "python", + "python3", + "src-python3", + "src-python" + ], + "uid": "12d3a608afe14a1e", + "variations": { + "shared": { + "env": { + "CM_PYTHON_INSTALL_CACHE_TAGS": "shared", + "CM_SHARED_BUILD": "yes" + } + }, + "with-ssl": { + "env": { + "CM_ENABLE_SSL": "yes", + "CM_PYTHON_INSTALL_CACHE_TAGS": "with-ssl" + } + }, + "with-custom-ssl": { + "deps": [ + { + "tags": "get,openssl" + } + ], + "env": { + "CM_CUSTOM_SSL": "yes", + "CM_PYTHON_INSTALL_CACHE_TAGS": "with-custom-ssl" + } + }, + "optimized": { + "env": { + "CM_PYTHON_OPTIMIZATION_FLAG": " --enable-optimizations", + "CM_PYTHON_INSTALL_CACHE_TAGS": "optimized" + } + }, + "lto": { + "env": { + "CM_PYTHON_LTO_FLAG": " --lto", + "CM_PYTHON_INSTALL_CACHE_TAGS": "with-lto" + } + } + } +} diff --git a/script/install-python-src/customize.py b/script/install-python-src/customize.py new file mode 100644 index 0000000000..a7025a6cf2 --- /dev/null +++ b/script/install-python-src/customize.py @@ -0,0 +1,46 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION','') + if need_version == '': + return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + + print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + + path_bin = os.path.join(os.getcwd(), 'install', 'bin') + + env['CM_PYTHON_INSTALLED_PATH'] = path_bin + + return {'return':0} + +def postprocess(i): + + env = i['env'] + variation_tags = i['variation_tags'] + + path_lib = os.path.join(os.getcwd(), 'install', 'lib') + env['+LD_LIBRARY_PATH'] = [ path_lib ] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_PYTHON_INSTALLED_PATH'], 'python3') + + # We don't need to check default paths here because we force install to cache + env['+PATH'] = [env['CM_PYTHON_INSTALLED_PATH']] + path_include = os.path.join(os.getcwd(), 'install', 'include') + env['+C_INCLUDE_PATH'] = [ path_include ] + + return {'return':0} diff --git a/script/install-python-src/run.sh b/script/install-python-src/run.sh new file mode 100644 index 0000000000..d151283e7a --- /dev/null +++ b/script/install-python-src/run.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "***********************************************************" +export PYTHON_VERSION=${CM_VERSION} +CM_WGET_URL="${CM_WGET_URL//"[PYTHON_VERSION]"/$PYTHON_VERSION}" + +echo "CM_WGET_URL=${CM_WGET_URL}" >> tmp-run-env.out +echo "wget Python src from ${CM_WGET_URL} for version ${PYTHON_VERSION}..." + +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +if [[ ${CM_SHARED_BUILD} == "yes" ]]; then + SHARED_BUILD_FLAGS=" --enable-shared" +else + SHARED_BUILD_FLAGS="" +fi + +EXTRA_FLAGS="" + +if [[ ${CM_ENABLE_SSL} == "yes" ]]; then + EXTRA_FLAGS="${EXTRA_FLAGS} --enable-ssl" +fi + + +if [[ ${CM_CUSTOM_SSL} == "yes" ]]; then + EXTRA_FLAGS="${EXTRA_FLAGS} --with-openssl=${CM_OPENSSL_INSTALLED_PATH} --with-openssl-rpath=auto" +fi + +rm -rf src +mkdir src + +rm -rf install +mkdir install + +cd src + +pwd +wget -nc ${CM_WGET_URL} + +if [ "${?}" != "0" ]; then exit 1; fi + +tar xzf Python-${PYTHON_VERSION}.tgz +if [ "${?}" != "0" ]; then exit 1; fi + + +rm -f Python-${PYTHON_VERSION}.tgz +if [ "${?}" != "0" ]; then exit 1; fi + +cd Python-${PYTHON_VERSION} + +./configure ${CM_PYTHON_OPTIMIZATION_FLAG} ${CM_PYTHON_LTO_FLAG} ${SHARED_BUILD_FLAGS} ${EXTRA_FLAGS} --with-ensurepip=install --prefix="${CUR_DIR}/install" +if [ "${?}" != "0" ]; then exit 1; fi + +make -j${CM_MAKE_CORES} +make -j${CM_MAKE_CORES} install +if [ "${?}" != "0" ]; then exit 1; fi + +echo "Removing src files" +cd "${CUR_DIR}" && \ +rm -rf src + +if [ "${?}" != "0" ]; then exit 1; fi + +cd "${CUR_DIR}/install/bin" && ln -s python3 python +cd "${CUR_DIR}/install/bin" && ln -s pip3 pip + +echo "********************************************************" +echo "Python was built and installed to ${CUR_DIR}/install ..." diff --git a/script/install-python-venv/README.md b/script/install-python-venv/README.md new file mode 100644 index 0000000000..3588ca8bc2 --- /dev/null +++ b/script/install-python-venv/README.md @@ -0,0 +1,154 @@ +Automatically generated README for this automation recipe: **install-python-venv** + +Category: **Python automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-python-venv,7633ebada4584c6c) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,python,get-python-venv,python-venv* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install python get-python-venv python-venv" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,python,get-python-venv,python-venv` + +`cm run script --tags=install,python,get-python-venv,python-venv[,variations] ` + +*or* + +`cmr "install python get-python-venv python-venv"` + +`cmr "install python get-python-venv python-venv [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,python,get-python-venv,python-venv' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,python,get-python-venv,python-venv"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,python,get-python-venv,python-venv) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install python get-python-venv python-venv[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_lto` + - Workflow: + * `_optimized` + - Workflow: + * `_shared` + - Workflow: + * `_with-custom-ssl` + - Workflow: + * `_with-ssl` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/_cm.json)*** + * get,python,-virtual + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-python-venv/_cm.json)*** + * get,python3 + * CM names: `--adr.['register-python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + +___ +### Script output +`cmr "install python get-python-venv python-venv [,variations]" -j` +#### New environment keys (filter) + +* `CM_PYTHON_BIN_WITH_PATH` +* `CM_VIRTUAL_ENV_*` +#### New environment keys auto-detected from customize + +* `CM_PYTHON_BIN_WITH_PATH` +* `CM_VIRTUAL_ENV_DIR` +* `CM_VIRTUAL_ENV_PATH` +* `CM_VIRTUAL_ENV_SCRIPTS_PATH` \ No newline at end of file diff --git a/script/install-python-venv/_cm.json b/script/install-python-venv/_cm.json new file mode 100644 index 0000000000..e1143980b8 --- /dev/null +++ b/script/install-python-venv/_cm.json @@ -0,0 +1,44 @@ +{ + "alias": "install-python-venv", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Python automation", + "clean_files": [], + "deps": [ + { + "reuse_version": true, + "inherit_variation_tags": true, + "tags": "get,python,-virtual" + } + ], + "new_env_keys": [ + "CM_VIRTUAL_ENV_*", + "CM_PYTHON_BIN_WITH_PATH" + ], + "new_state_keys": [ + "script_prefix" + ], + "post_deps": [ + { + "names": [ + "register-python" + ], + "tags": "get,python3" + } + ], + "tags": [ + "install", + "python", + "get-python-venv", + "python-venv" + ], + "uid": "7633ebada4584c6c", + "variations": { + "lto": {}, + "optimized": {}, + "shared": {}, + "with-custom-ssl": {}, + "with-ssl": {} + } +} diff --git a/script/install-python-venv/customize.py b/script/install-python-venv/customize.py new file mode 100644 index 0000000000..84fe4984ad --- /dev/null +++ b/script/install-python-venv/customize.py @@ -0,0 +1,83 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + # Add extra tags to python + add_extra_cache_tags = [] # for this script + add_python_extra_cache_tags = ['virtual'] # for get-python script + + name = env.get('CM_NAME','') + if not quiet and name == '': + print ('') + x = input('Enter some tag to describe this virtual env (mlperf-inf,octoml-bench,etc): ') + x = x.strip() + + if x != '': name = x + + directory_name = 'venv' + if name != '': + directory_name = name.strip().lower() + name_tag = 'name-' + directory_name + + add_extra_cache_tags.append(name_tag) + add_python_extra_cache_tags.append(name_tag) + + env['CM_VIRTUAL_ENV_DIR'] = directory_name + env['CM_VIRTUAL_ENV_PATH'] = os.path.join(os.getcwd(), directory_name) + + s = 'Scripts' if os_info['platform'] == 'windows' else 'bin' + env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] = os.path.join(env['CM_VIRTUAL_ENV_PATH'], s) + + env['CM_TMP_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] + env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' + + + r = automation.update_deps({'deps':meta['post_deps'], + 'update_deps':{'register-python': + {'extra_cache_tags':','.join(add_python_extra_cache_tags)}}}) + if r['return']>0: return r + + env['CM_PYTHON_INSTALLED_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] + + return {'return':0, 'add_extra_cache_tags':add_extra_cache_tags} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + state = i['state'] + + script_prefix = state.get('script_prefix',[]) + + path_to_activate = os.path.join(env['CM_VIRTUAL_ENV_SCRIPTS_PATH'], 'activate') + + # If windows, download here otherwise use run.sh + if os_info['platform'] == 'windows': + path_to_activate += '.bat' + + s = os_info['run_bat'].replace('${bat_file}', '"'+path_to_activate+'"') + + script_prefix.append(s) + state['script_prefix'] = script_prefix + + python_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' + + # Will be passed to get-python to finalize registering of the new python + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_PYTHON_INSTALLED_PATH'], python_name) + + return {'return':0} diff --git a/script/install-python-venv/run.bat b/script/install-python-venv/run.bat new file mode 100644 index 0000000000..6c48e1bdc6 --- /dev/null +++ b/script/install-python-venv/run.bat @@ -0,0 +1,5 @@ +%CM_PYTHON_BIN_WITH_PATH% -m pip install virtualenv +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +%CM_PYTHON_BIN_WITH_PATH% -m venv %CM_VIRTUAL_ENV_DIR% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/install-python-venv/run.sh b/script/install-python-venv/run.sh new file mode 100644 index 0000000000..87dfcaf10d --- /dev/null +++ b/script/install-python-venv/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +#PIP_EXTRA=`${CM_PYTHON_BIN} -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` +PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` + +${CM_PYTHON_BIN_WITH_PATH} -m pip install virtualenv ${PIP_EXTRA} +test $? -eq 0 || exit 1 + +${CM_PYTHON_BIN_WITH_PATH} -m venv ${CM_VIRTUAL_ENV_DIR} +test $? -eq 0 || exit 1 diff --git a/script/install-pytorch-from-src/README.md b/script/install-pytorch-from-src/README.md new file mode 100644 index 0000000000..db0aba0520 --- /dev/null +++ b/script/install-pytorch-from-src/README.md @@ -0,0 +1,249 @@ +Automatically generated README for this automation recipe: **install-pytorch-from-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-pytorch-from-src,64eaf3e81de94f41) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,get,src,from.src,pytorch,src-pytorch* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install get src from.src pytorch src-pytorch" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,get,src,from.src,pytorch,src-pytorch` + +`cm run script --tags=install,get,src,from.src,pytorch,src-pytorch[,variations] ` + +*or* + +`cmr "install get src from.src pytorch src-pytorch"` + +`cmr "install get src from.src pytorch src-pytorch [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,pytorch,src-pytorch' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,get,src,from.src,pytorch,src-pytorch"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,get,src,from.src,pytorch,src-pytorch) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install get src from.src pytorch src-pytorch[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_cuda` + - Environment variables: + - *CUDA_HOME*: `<<>>` + - *CUDNN_LIBRARY_PATH*: `<<>>` + - *CUDNN_INCLUDE_PATH*: `<<>>` + - *CUDA_NVCC_EXECUTABLE*: `<<>>` + - *USE_CUDA*: `1` + - *USE_CUDNN*: `1` + - *TORCH_CUDA_ARCH_LIST*: `Ampere Ada Hopper` + - *TORCH_CXX_FLAGS*: `-D_GLIBCXX_USE_CXX11_ABI=1` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda,_cudnn + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_for-intel-mlperf-inference-v3.1-bert` + - Environment variables: + - *CM_CONDA_ENV*: `yes` + - *CM_MLPERF_INFERENCE_INTEL*: `yes` + - *USE_CUDA*: `0` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-sys-util,_libffi7 + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,conda,_name.bert-pt + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,generic,conda-package,_package.ncurses,_source.conda-forge + * CM names: `--adr.['conda-package', 'ncurses']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.python + * CM names: `--adr.['conda-package', 'python3']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * install,llvm,src,_tag.llvmorg-15.0.7,_runtimes.libcxx:libcxxabi:openmp,_clang,_release,_for-intel-mlperf-inference-v3.1-bert + - CM script: [install-llvm-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-src) + * get,generic,conda-package,_package.ninja + * CM names: `--adr.['conda-package', 'ninja']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.cmake + * CM names: `--adr.['conda-package', 'cmake']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.mkl,_source.intel + * CM names: `--adr.['conda-package', 'mkl']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.mkl-include,_source.intel + * CM names: `--adr.['conda-package', 'mkl-include']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.intel-openmp,_source.intel + * CM names: `--adr.['conda-package', 'intel-openmp']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + * CM names: `--adr.['conda-package', 'llvm-openmp']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.jemalloc,_source.conda-forge + * CM names: `--adr.['conda-package', 'jemalloc']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.wheel,_source.conda-forge + * CM names: `--adr.['conda-package', 'wheel']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.setuptools,_source.conda-forge + * CM names: `--adr.['conda-package', 'setuptools']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.future,_source.conda-forge + * CM names: `--adr.['conda-package', 'future']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + * CM names: `--adr.['conda-package', 'libstdcxx-ng']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * `_for-nvidia-mlperf-inference-v3.1` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/pytorch/pytorch`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/pytorch/pytorch` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/pytorch/pytorch` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo + * CM names: `--adr.['pytorch-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run-intel-mlperf-inference-v3_1.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-from-src/_cm.json) + +___ +### Script output +`cmr "install get src from.src pytorch src-pytorch [,variations]" -j` +#### New environment keys (filter) + +* `CM_PYTORCH_*` +#### New environment keys auto-detected from customize diff --git a/script/install-pytorch-from-src/_cm.json b/script/install-pytorch-from-src/_cm.json new file mode 100644 index 0000000000..75f15a5018 --- /dev/null +++ b/script/install-pytorch-from-src/_cm.json @@ -0,0 +1,255 @@ +{ + "alias": "install-pytorch-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "tags": "get,python3", + "skip_if_env": { + "CM_CONDA_ENV": [ "yes" ] + }, + "names": [ + "python", + "python3" + ] + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_PYTORCH_SRC_REPO_PATH" + }, + "extra_cache_tags": "pytorch,src,pytorch-src,pytorch-src-repo", + "names": [ + "pytorch-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + "CM_GIT_URL": "https://github.com/pytorch/pytorch" + }, + "name": "Build pytorch from sources", + "new_env_keys": [ + "CM_PYTORCH_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "pytorch", + "src-pytorch" + ], + "uid": "64eaf3e81de94f41", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "repo.https://github.com/pytorch/pytorch": { + "env": { + "CM_GIT_URL": "https://github.com/pytorch/pytorch" + }, + "group": "repo", + "default": true + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + }, + "ad": { + "pytorch-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + } + }, + "for-intel-mlperf-inference-v3.1-bert": { + "base": [ + "tag.v1.12.0" + ], + "adr": { + "conda-package": { + "tags": "_name.bert-pt" + } + }, + "env": { + "CM_CONDA_ENV": "yes", + "CM_MLPERF_INFERENCE_INTEL": "yes", + "USE_CUDA": "0" + }, + "deps": [ + { + "tags": "get,generic-sys-util,_libffi7" + }, + { + "tags": "get,conda,_name.bert-pt", + "names": [ "conda" ] + }, + { + "names": [ + "conda-package", + "ncurses" + ], + "tags": "get,generic,conda-package,_package.ncurses,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "python3" + ], + "tags": "get,generic,conda-package,_package.python", + "version": "3.8" + }, + { + "tags": "install,llvm,src,_tag.llvmorg-15.0.7,_runtimes.libcxx:libcxxabi:openmp,_clang,_release,_for-intel-mlperf-inference-v3.1-bert" + }, + { + "names": [ + "conda-package", + "ninja" + ], + "tags": "get,generic,conda-package,_package.ninja" + }, + { + "names": [ + "conda-package", + "cmake" + ], + "tags": "get,generic,conda-package,_package.cmake" + }, + { + "names": [ + "conda-package", + "mkl" + ], + "tags": "get,generic,conda-package,_package.mkl,_source.intel", + "version": "2023.1.0" + }, + { + "names": [ + "conda-package", + "mkl-include" + ], + "tags": "get,generic,conda-package,_package.mkl-include,_source.intel", + "version": "2023.1.0" + }, + { + "names": [ + "conda-package", + "intel-openmp" + ], + "tags": "get,generic,conda-package,_package.intel-openmp,_source.intel", + "version": "2023.1.0" + }, + { + "names": [ + "conda-package", + "llvm-openmp" + ], + "tags": "get,generic,conda-package,_package.llvm-openmp,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "jemalloc" + ], + "tags": "get,generic,conda-package,_package.jemalloc,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "wheel" + ], + "tags": "get,generic,conda-package,_package.wheel,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "setuptools" + ], + "tags": "get,generic,conda-package,_package.setuptools,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "future" + ], + "tags": "get,generic,conda-package,_package.future,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "libstdcxx-ng" + ], + "tags": "get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge" + } + ] + }, + "for-nvidia-mlperf-inference-v3.1": { + "base": [ + "sha.b5021ba9", + "cuda" + ], + "deps": [ + { + "tags": "get,cmake", + "version_min": "3.25.0" + } + ] + }, + "cuda": { + "deps": [ + { + "tags": "get,cuda,_cudnn", + "names": [ "cuda" ] + } + ], + "env": { + "CUDA_HOME": "<<>>", + "CUDNN_LIBRARY_PATH": "<<>>", + "CUDNN_INCLUDE_PATH": "<<>>", + "CUDA_NVCC_EXECUTABLE": "<<>>", + "USE_CUDA": "1", + "USE_CUDNN": "1", + "TORCH_CUDA_ARCH_LIST": "Ampere Ada Hopper", + "TORCH_CXX_FLAGS": "-D_GLIBCXX_USE_CXX11_ABI=1" + } + } + }, + "versions": {} +} diff --git a/script/install-pytorch-from-src/customize.py b/script/install-pytorch-from-src/customize.py new file mode 100644 index 0000000000..38494d7b47 --- /dev/null +++ b/script/install-pytorch-from-src/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + if env.get('CM_MLPERF_INFERENCE_INTEL', '') == "yes": + i['run_script_input']['script_name'] = "run-intel-mlperf-inference-v3_1" + run_cmd="CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . " + + env['CM_RUN_CMD'] = run_cmd + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return':0} diff --git a/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh b/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh new file mode 100644 index 0000000000..7ad6fbd610 --- /dev/null +++ b/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +CUR_DIR=$PWD +rm -rf pytorch +cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch +cd pytorch +rm -rf build + +git submodule sync +git submodule update --init --recursive +if [ "${?}" != "0" ]; then exit 1; fi +pushd third_party/gloo +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/gloo.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply gloo.patch +if [ "${?}" != "0" ]; then exit 1; fi +popd + +pushd third_party/ideep/mkl-dnn +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/clang_mkl_dnn.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply clang_mkl_dnn.patch +if [ "${?}" != "0" ]; then exit 1; fi +popd + +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/pytorch_official_1_12.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply pytorch_official_1_12.patch +if [ "${?}" != "0" ]; then exit 1; fi +pip install -r requirements.txt + +cmd="${CM_RUN_CMD}" +echo ${cmd} +eval ${cmd} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/script/install-pytorch-from-src/run.sh b/script/install-pytorch-from-src/run.sh new file mode 100644 index 0000000000..3d76b8efd1 --- /dev/null +++ b/script/install-pytorch-from-src/run.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +CUR_DIR=$PWD +if [[ ! -e pytorch/dist/torch*.whl ]]; then + rm -rf pytorch + cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch + cd pytorch + git submodule sync + git submodule update --init --recursive + rm -rf build + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r requirements.txt + test $? -eq 0 || exit $? + ${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel + test $? -eq 0 || exit $? +else + cd pytorch +fi + +cd dist +${CM_PYTHON_BIN_WITH_PATH} -m pip install torch-2.*linux_x86_64.whl +test $? -eq 0 || exit $? diff --git a/script/install-pytorch-kineto-from-src/README.md b/script/install-pytorch-kineto-from-src/README.md new file mode 100644 index 0000000000..9c5ad5af0a --- /dev/null +++ b/script/install-pytorch-kineto-from-src/README.md @@ -0,0 +1,192 @@ +Automatically generated README for this automation recipe: **install-pytorch-kineto-from-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-pytorch-kineto-from-src,98a4b061712d4483) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-kineto-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto` + +`cm run script --tags=install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto[,variations] ` + +*or* + +`cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto"` + +`cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install get src from.src pytorch-kineto kineto src-pytorch-kineto[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_cuda` + - Environment variables: + - *CUDA_HOME*: `<<>>` + - *CUDA_NVCC_EXECUTABLE*: `<<>>` + - *CUDNN_INCLUDE_PATH*: `<<>>` + - *CUDNN_LIBRARY_PATH*: `<<>>` + - *TORCH_CUDA_ARCH_LIST*: `Ampere Ada Hopper` + - *TORCH_CXX_FLAGS*: `-D_GLIBCXX_USE_CXX11_ABI=1` + - *USE_CUDA*: `1` + - *USE_CUDNN*: `1` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda,_cudnn + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/pytorch/kineto`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/pytorch/kineto` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/pytorch/kineto` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-kineto-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,git,repo + * CM names: `--adr.['pytorch-kineto-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-kineto-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-kineto-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-kineto-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-kineto-from-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-pytorch-kineto-from-src/_cm.json) + +___ +### Script output +`cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [,variations]" -j` +#### New environment keys (filter) + +* `CM_PYTORCH_KINETO_*` +#### New environment keys auto-detected from customize diff --git a/script/install-pytorch-kineto-from-src/_cm.json b/script/install-pytorch-kineto-from-src/_cm.json new file mode 100644 index 0000000000..d7417150b2 --- /dev/null +++ b/script/install-pytorch-kineto-from-src/_cm.json @@ -0,0 +1,130 @@ +{ + "alias": "install-pytorch-kineto-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "tags": "get,cmake", + "version_min": "3.25.0" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_PYTORCH_KINETO_SRC_REPO_PATH" + }, + "extra_cache_tags": "pytorch-kineto,kineto,src,pytorch-kineto-src,pytorch-kineto-src-repo", + "names": [ + "pytorch-kineto-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + "CM_GIT_URL": "https://github.com/pytorch/kineto" + }, + "name": "Build pytorch kineto from sources", + "new_env_keys": [ + "CM_PYTORCH_KINETO_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "pytorch-kineto", + "kineto", + "src-pytorch-kineto" + ], + "uid": "98a4b061712d4483", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "cuda": { + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda,_cudnn" + } + ], + "env": { + "CUDA_HOME": "<<>>", + "CUDA_NVCC_EXECUTABLE": "<<>>", + "CUDNN_INCLUDE_PATH": "<<>>", + "CUDNN_LIBRARY_PATH": "<<>>", + "TORCH_CUDA_ARCH_LIST": "Ampere Ada Hopper", + "TORCH_CXX_FLAGS": "-D_GLIBCXX_USE_CXX11_ABI=1", + "USE_CUDA": "1", + "USE_CUDNN": "1" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/pytorch/kineto": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/pytorch/kineto" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "pytorch-src-repo": { + "tags": "_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-pytorch-kineto-from-src/customize.py b/script/install-pytorch-kineto-from-src/customize.py new file mode 100644 index 0000000000..df2744ac4d --- /dev/null +++ b/script/install-pytorch-kineto-from-src/customize.py @@ -0,0 +1,17 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return':0} diff --git a/script/install-pytorch-kineto-from-src/run.sh b/script/install-pytorch-kineto-from-src/run.sh new file mode 100644 index 0000000000..bd162e7f8e --- /dev/null +++ b/script/install-pytorch-kineto-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf kineto +cp -r ${CM_PYTORCH_KINETO_SRC_REPO_PATH} kineto +cd kineto +rm -rf libkineto/build + +mkdir -p libkneto/build && cd libkineto/build +cmake .. +test $? -eq 0 || exit $? +make +test $? -eq 0 || exit $? +sudo make install +test $? -eq 0 || exit $? diff --git a/script/install-qaic-compute-sdk-from-src/README.md b/script/install-qaic-compute-sdk-from-src/README.md new file mode 100644 index 0000000000..ada40a2243 --- /dev/null +++ b/script/install-qaic-compute-sdk-from-src/README.md @@ -0,0 +1,201 @@ +Automatically generated README for this automation recipe: **install-qaic-compute-sdk-from-src** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-qaic-compute-sdk-from-src,9701bdda97fa4045) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk` + +`cm run script --tags=get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk[,variations] ` + +*or* + +`cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk"` + +`cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + +
+ + + * Group "**installation-mode**" +
+ Click here to expand this section. + + * `_debug` + - Environment variables: + - *CM_QAIC_COMPUTE_SDK_INSTALL_MODE*: `debug` + - Workflow: + * **`_release`** (default) + - Environment variables: + - *CM_QAIC_COMPUTE_SDK_INSTALL_MODE*: `release` + - Workflow: + * `_release-assert` + - Environment variables: + - *CM_QAIC_COMPUTE_SDK_INSTALL_MODE*: `release-assert` + - Workflow: + +
+ + + * Group "**repo-source**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.quic`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc` + - Workflow: + +
+ + +#### Default variations + +`_release,_repo.quic` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src/_cm.json)*** + * get,git,repo,_repo.https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc + * CM names: `--adr.['qaic-software-git-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * get,cmake + * CM names: `--adr.['cmake']...` + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,llvm,_from-src + * CM names: `--adr.['llvm']...` + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + * get,generic,sys-util,_libudev-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic,sys-util,_libpci-dev + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,google,test + - CM script: [get-google-test](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-google-test) + * get,generic-sys-util,_ninja-build + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic-sys-util,_rsync + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * download-and-extract,_extract,_url.https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/v15.0.5/clang+llvm-15.0.5-cross-hexagon-unknown-linux-musl.tar.xz + * CM names: `--adr.['dae']...` + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-qaic-compute-sdk-from-src/_cm.json) + +___ +### Script output +`cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [,variations]" -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_QAIC_COMPUTE_SDK_PATH` +#### New environment keys auto-detected from customize + +* `CM_QAIC_COMPUTE_SDK_PATH` \ No newline at end of file diff --git a/script/install-qaic-compute-sdk-from-src/_cm.json b/script/install-qaic-compute-sdk-from-src/_cm.json new file mode 100644 index 0000000000..c2c54c715f --- /dev/null +++ b/script/install-qaic-compute-sdk-from-src/_cm.json @@ -0,0 +1,118 @@ +{ + "alias": "install-qaic-compute-sdk-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "deps": [ + { + "names": [ + "qaic-software-git-repo" + ], + "update_tags_from_env_with_prefix": { + "_repo.": [ "CM_GIT_URL" ], + "_branch.": [ "CM_GIT_CHECKOUT" ] + }, + "tags": "get,git,repo,_repo.https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc", + "extra_cache_tags": "compute-sdk,qaic,from.src" + }, + { + "names": [ + "cmake" + ], + "tags": "get,cmake", + "version_min": "3.24.0" + }, + { + "names": [ + "llvm" + ], + "tags": "get,llvm,_from-src" + }, + { + "tags": "get,generic,sys-util,_libudev-dev" + }, + { + "tags": "get,generic,sys-util,_libpci-dev" + }, + { + "tags": "get,google,test" + }, + { + "tags": "get,generic-sys-util,_ninja-build" + }, + { + "tags": "get,generic-sys-util,_rsync" + }, + { + "env": { + "CM_EXTRACT_FINAL_ENV_NAME": "CM_HEXAGON_TOOLS_INSTALLED_DIR" + }, + "tags": "download-and-extract,_extract,_url.https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/v15.0.5/clang+llvm-15.0.5-cross-hexagon-unknown-linux-musl.tar.xz", + "names": [ "dae" ], + "extra_cache_tags": "hexagon-compiler", + "force_cache": true + } + ], + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "+PATH", + "CM_QAIC_COMPUTE_SDK_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "get", + "qaic", + "from.src", + "software", + "compute", + "compute-sdk", + "qaic-compute-sdk", + "sdk" + ], + "uid": "9701bdda97fa4045", + "variations": { + "debug": { + "env": { + "CM_QAIC_COMPUTE_SDK_INSTALL_MODE": "debug" + }, + "group": "installation-mode" + }, + "release": { + "default": true, + "env": { + "CM_QAIC_COMPUTE_SDK_INSTALL_MODE": "release" + }, + "group": "installation-mode" + }, + "release-assert": { + "env": { + "CM_QAIC_COMPUTE_SDK_INSTALL_MODE": "release-assert" + }, + "group": "installation-mode" + }, + "repo.quic": { + "group": "repo-source", + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc" + } + }, + "repo.#": { + "group": "repo-source", + "env": { + "CM_GIT_URL": "#" + } + }, + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-qaic-compute-sdk-from-src/customize.py b/script/install-qaic-compute-sdk-from-src/customize.py new file mode 100644 index 0000000000..f6d4b1ab92 --- /dev/null +++ b/script/install-qaic-compute-sdk-from-src/customize.py @@ -0,0 +1,43 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_QAIC_COMPUTE_SDK_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + + ''' + if env.get('+PATH', []) == []: + env['+PATH'] = [] + env['+PATH'].append(env['CM_LLVM_INSTALLED_PATH']) + + if env.get('+LD_LIBRARY_PATH', []) == []: + env['+LD_LIBRARY_PATH'] = [] + env['+LD_LIBRARY_PATH'].append(os.path.join(env['CM_LLVM_INSTALLED_PATH'], "..", "lib")) + ''' + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + #env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + + if '+PATH' not in env: + env['+PATH'] = [] + + env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] = os.path.join(os.getcwd(), "src", "install", "qaic-compute-"+env['CM_QAIC_COMPUTE_SDK_INSTALL_MODE']) + + env['QAIC_COMPUTE_INSTALL_DIR'] = env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] + + env['+PATH'].append(os.path.join(env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'], "exec")) + + return {'return':0} diff --git a/script/install-qaic-compute-sdk-from-src/run.sh b/script/install-qaic-compute-sdk-from-src/run.sh new file mode 100644 index 0000000000..734fe01b94 --- /dev/null +++ b/script/install-qaic-compute-sdk-from-src/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +function cmake() { +${CM_CMAKE_BIN_WITH_PATH} $@ +} + +export CC=${CM_C_COMPILER_WITH_PATH} +export CXX=${CM_CXX_COMPILER_WITH_PATH} + +export -f cmake +export HEXAGON_TOOLS_DIR=${CM_HEXAGON_TOOLS_INSTALLED_DIR}/clang+llvm-15.0.5-cross-hexagon-unknown-linux-musl/x86_64-linux-gnu + +mkdir -p src +rsync -avz --exclude=.git ${CM_QAIC_COMPUTE_SDK_PATH}/ src/ +cd src + +if [[ ${CM_CLEAN_BUILD} == "yes" ]]; then + rm -rf build +fi + +./scripts/build.sh --${CM_QAIC_COMPUTE_SDK_INSTALL_MODE} --install +test $? -eq 0 || exit $? + +cd - diff --git a/script/install-rocm/README.md b/script/install-rocm/README.md new file mode 100644 index 0000000000..8bbe710108 --- /dev/null +++ b/script/install-rocm/README.md @@ -0,0 +1,131 @@ +Automatically generated README for this automation recipe: **install-rocm** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-rocm,9d13f90463ce4545) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,rocm,install-rocm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install rocm install-rocm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,rocm,install-rocm` + +`cm run script --tags=install,rocm,install-rocm ` + +*or* + +`cmr "install rocm install-rocm"` + +`cmr "install rocm install-rocm " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,rocm,install-rocm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,rocm,install-rocm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,rocm,install-rocm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install rocm install-rocm" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `5.7.1` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/_cm.json) + 1. ***Run native script if exists*** + * [run-rhel.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/run-rhel.sh) + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-rocm/_cm.json) + +___ +### Script output +`cmr "install rocm install-rocm " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_ROCM_*` +#### New environment keys auto-detected from customize + +* `CM_ROCM_BIN_WITH_PATH` +* `CM_ROCM_INSTALLED_PATH` \ No newline at end of file diff --git a/script/install-rocm/_cm.json b/script/install-rocm/_cm.json new file mode 100644 index 0000000000..925f8635f0 --- /dev/null +++ b/script/install-rocm/_cm.json @@ -0,0 +1,28 @@ +{ + "alias": "install-rocm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "clean_files": [], + "default_version": "5.7.1", + "deps": [ + { + "tags": "detect,os" + } + ], + "env": { + }, + "new_env_keys": [ + "CM_ROCM_*", + "+PATH" + ], + "tags": [ + "install", + "rocm", + "install-rocm" + ], + "uid": "9d13f90463ce4545", + "versions": { + } +} diff --git a/script/install-rocm/customize.py b/script/install-rocm/customize.py new file mode 100644 index 0000000000..3ae7daafb6 --- /dev/null +++ b/script/install-rocm/customize.py @@ -0,0 +1,19 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + env = i['env'] + installed_path = "/opt/rocm/bin" + env['CM_ROCM_INSTALLED_PATH'] = installed_path + env['CM_ROCM_BIN_WITH_PATH'] = os.path.join(installed_path, "rocminfo") + env['+PATH'] = [ installed_path ] + + return {'return':0} diff --git a/script/install-rocm/run-rhel.sh b/script/install-rocm/run-rhel.sh new file mode 100644 index 0000000000..10f8a6789a --- /dev/null +++ b/script/install-rocm/run-rhel.sh @@ -0,0 +1,27 @@ +# Add the amdgpu module repository for RHEL +repo1="[amdgpu] +name=amdgpu +baseurl=https://repo.radeon.com/amdgpu/${CM_VERSION}/rhel/${CM_HOST_OS_VERSION}/main/x86_64 +enabled=1 +gpgcheck=1 +gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key +" +echo "${repo1}" | sudo tee /etc/yum.repos.d/amdgpu.repo + +# Add the rocm repository for RHEL +mainversion="${CM_HOST_OS_VERSION%%.*}" +repo2="[rocm] +name=rocm +baseurl=https://repo.radeon.com/rocm/rhel${mainversion}/latest/main +enabled=1 +priority=50 +gpgcheck=1 +gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key +" +echo "${repo2}" | sudo tee /etc/yum.repos.d/rocm.repo + +sudo yum clean all + +sudo yum install amdgpu-dkms + +sudo yum install rocm-hip-libraries diff --git a/script/install-rocm/run-ubuntu.sh b/script/install-rocm/run-ubuntu.sh new file mode 100644 index 0000000000..400ba5fa73 --- /dev/null +++ b/script/install-rocm/run-ubuntu.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Make the directory if it doesn't exist yet. +# This location is recommended by the distribution maintainers. +sudo mkdir --parents --mode=0755 /etc/apt/keyrings +# Download the key, convert the signing-key to a full +# keyring required by apt and store in the keyring directory +wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | \ + gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null + +ubuntuflavor="jammy" +if [[ ${CM_HOST_OS_VERSION} == "22.04" ]]; then + ubuntuflavor="jammy" +elif [[ ${CM_HOST_OS_VERSION} == "20.04" ]]; then + ubuntuflavor="focal" +fi + +# Kernel driver repository +deb1="deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${CM_VERSION}/ubuntu ${ubuntuflavor} main" +echo $deb1 | sudo tee /etc/apt/sources.list.d/amdgpu.list + +# ROCm repository +deb2="deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/debian ${ubuntuflavor} main" +echo $deb2 | sudo tee /etc/apt/sources.list.d/rocm.list + +# Prefer packages from the rocm repository over system packages +echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600 + +sudo apt update + +sudo apt install amdgpu-dkms + +sudo apt install rocm-hip-libraries diff --git a/script/install-rocm/run.sh b/script/install-rocm/run.sh new file mode 100644 index 0000000000..05a7907cf5 --- /dev/null +++ b/script/install-rocm/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash + diff --git a/script/install-tensorflow-for-c/README.md b/script/install-tensorflow-for-c/README.md new file mode 100644 index 0000000000..ecec97bb28 --- /dev/null +++ b/script/install-tensorflow-for-c/README.md @@ -0,0 +1,124 @@ +Automatically generated README for this automation recipe: **install-tensorflow-for-c** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-tensorflow-for-c,d73783d8302547d7) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,tensorflow,lib,lang-c* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install tensorflow lib lang-c" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,tensorflow,lib,lang-c` + +`cm run script --tags=install,tensorflow,lib,lang-c ` + +*or* + +`cmr "install tensorflow lib lang-c"` + +`cmr "install tensorflow lib lang-c " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,tensorflow,lib,lang-c' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,tensorflow,lib,lang-c"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,tensorflow,lib,lang-c) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install tensorflow lib lang-c" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `2.8.0` + +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-for-c/_cm.json) + +___ +### Script output +`cmr "install tensorflow lib lang-c " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/install-tensorflow-for-c/_cm.json b/script/install-tensorflow-for-c/_cm.json new file mode 100644 index 0000000000..3aa23092bd --- /dev/null +++ b/script/install-tensorflow-for-c/_cm.json @@ -0,0 +1,21 @@ +{ + "alias": "install-tensorflow-for-c", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML frameworks", + "cache": true, + "clean_files": [], + "default_version": "2.8.0", + "deps": [ + { + "tags": "detect,os" + } + ], + "tags": [ + "install", + "tensorflow", + "lib", + "lang-c" + ], + "uid": "d73783d8302547d7" +} diff --git a/script/install-tensorflow-for-c/customize.py b/script/install-tensorflow-for-c/customize.py new file mode 100644 index 0000000000..a0a8a42148 --- /dev/null +++ b/script/install-tensorflow-for-c/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if key not in env: + env[key] = [] + + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) + + lib_path = os.path.join(os.getcwd(), 'install', 'lib') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return':0} diff --git a/script/install-tensorflow-for-c/run.sh b/script/install-tensorflow-for-c/run.sh new file mode 100644 index 0000000000..2f7c3957bf --- /dev/null +++ b/script/install-tensorflow-for-c/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +CM_VERSION=${CM_VERSION:-2.8.0} +if [[ ${CM_HOST_PLATFORM_FLAVOR} != 'x86_64' ]]; then + echo "Platform ${CM_HOST_PLATFORM_FLAVOR} is not supported yet!"; + exit 1 +fi +mkdir install +FILENAME=libtensorflow-cpu-${CM_HOST_OS_TYPE}-x86_64-${CM_VERSION}.tar.gz +wget -q --no-check-certificate https://storage.googleapis.com/tensorflow/libtensorflow/${FILENAME} +tar -C install -xzf ${FILENAME} + +test $? -eq 0 || exit 1 diff --git a/script/install-tensorflow-from-src/README.md b/script/install-tensorflow-from-src/README.md new file mode 100644 index 0000000000..febdbbd0f3 --- /dev/null +++ b/script/install-tensorflow-from-src/README.md @@ -0,0 +1,166 @@ +Automatically generated README for this automation recipe: **install-tensorflow-from-src** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-tensorflow-from-src,a974533c4c854597) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,install,tensorflow,lib,source,from-source,from-src,src,from.src* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get install tensorflow lib source from-source from-src src from.src" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,install,tensorflow,lib,source,from-source,from-src,src,from.src` + +`cm run script --tags=get,install,tensorflow,lib,source,from-source,from-src,src,from.src[,variations] ` + +*or* + +`cmr "get install tensorflow lib source from-source from-src src from.src"` + +`cmr "get install tensorflow lib source from-source from-src src from.src [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,tensorflow,lib,source,from-source,from-src,src,from.src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,install,tensorflow,lib,source,from-source,from-src,src,from.src"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,install,tensorflow,lib,source,from-source,from-src,src,from.src) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get install tensorflow lib source from-source from-src src from.src[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_tflite` + - Environment variables: + - *CM_TFLITE*: `on` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_URL: `https://github.com/tensorflow/tensorflow` +* CM_GIT_DEPTH: `1` +* CM_TFLITE: `off` + +
+ +#### Versions +Default version: `master` + +* `master` +* `v1.15.0` +* `v2.0.0` +* `v2.1.0` +* `v2.2.0` +* `v2.3.0` +* `v2.4.0` +* `v2.5.0` +* `v2.6.0` +* `v2.7.0` +* `v2.8.0` +* `v2.9.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,generic-sys-util,_zlib + * `if (CM_HOST_OS_FLAVOR == ubuntu AND CM_HOST_OS_VERSION == 18.04)` + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic-python-lib,_package.numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tensorflow-from-src/_cm.json) + +___ +### Script output +`cmr "get install tensorflow lib source from-source from-src src from.src [,variations]" -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +#### New environment keys auto-detected from customize diff --git a/script/install-tensorflow-from-src/_cm.json b/script/install-tensorflow-from-src/_cm.json new file mode 100644 index 0000000000..da28df774d --- /dev/null +++ b/script/install-tensorflow-from-src/_cm.json @@ -0,0 +1,338 @@ +{ + "alias": "install-tensorflow-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "clean_files": [], + "default_version": "master", + "deps": [ + { + "tags": "detect,cpu" + }, + { + "tags": "detect,os" + }, + { + "tags": "get,generic-sys-util,_zlib", + "enable_if_env": { + "CM_HOST_OS_FLAVOR": [ "ubuntu" ], + "CM_HOST_OS_VERSION": [ "18.04" ] + } + }, + { + "tags": "get,generic-python-lib,_package.numpy" + } + ], + "default_env": { + "CM_GIT_URL": "https://github.com/tensorflow/tensorflow", + "CM_GIT_DEPTH": "1", + "CM_TFLITE": "off" + }, + "extra_cache_tags_from_env": [ + { + "env": "CM_PYTHON_CACHE_TAGS", + "prefix": "python-" + } + ], + "tags": [ + "get", + "install", + "tensorflow", + "lib", + "source", + "from-source", + "from-src", + "src", + "from.src" + ], + "new_env_keys": [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ], + "uid": "a974533c4c854597", + "variations": { + "tflite": { + "env": { + "CM_TFLITE": "on" + } + } + }, + "versions": { + "master": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.10.999", + "version_min": "3.7.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_min": "9.3" + }, + { + "tags": "get,bazel", + "version": "6.5.0" + } + ], + "env": { + "CM_GIT_CHECKOUT": "master" + } + }, + "v1.15.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.7", + "version_min": "3.3", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "0.26.1" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v1.15.0" + } + }, + "v2.0.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.7.999", + "version_max_usable": "3.7.12", + "version_min": "3.3.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "0.26.1" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.0.0" + } + }, + "v2.1.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.7.999", + "version_max_usable": "3.7.12", + "version_min": "3.3.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "0.27.1" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.1.0" + } + }, + "v2.2.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.8.999", + "version_max_usable": "3.8.12", + "version_min": "3.5.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "2.0.0" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.2.0" + } + }, + "v2.3.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.8.999", + "version_max_usable": "3.8.12", + "version_min": "3.5.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "3.1.0" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.3.0" + } + }, + "v2.4.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.8.999", + "version_max_usable": "3.8.12", + "version_min": "3.6.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "3.1.0" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.4.0" + } + }, + "v2.5.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.9.999", + "version_max_usable": "3.9.12", + "version_min": "3.6.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "3.7.2" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.5.0" + } + }, + "v2.6.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.9.999", + "version_max_usable": "3.9.12", + "version_min": "3.6.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "3.7.2" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.6.0" + } + }, + "v2.7.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.9.999", + "version_max_usable": "3.9.12", + "version_min": "3.7.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "3.7.2" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.7.0" + } + }, + "v2.8.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.10.999", + "version_max_usable": "3.10.12", + "version_min": "3.7.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "7.3.1", + "version_min": "7" + }, + { + "tags": "get,bazel", + "version": "4.2.1" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.8.0" + } + }, + "v2.9.0": { + "deps": [ + { + "tags": "get,python", + "version_max": "3.10.999", + "version_max_usable": "3.10.12", + "version_min": "3.7.0", + "names":["python","python3"] + }, + { + "tags": "gcc,get", + "version_max": "10", + "version_min": "9" + }, + { + "tags": "get,bazel", + "version": "5.0.0" + } + ], + "env": { + "CM_GIT_CHECKOUT": "v2.9.0" + } + } + } +} diff --git a/script/install-tensorflow-from-src/customize.py b/script/install-tensorflow-from-src/customize.py new file mode 100644 index 0000000000..646c4f7de3 --- /dev/null +++ b/script/install-tensorflow-from-src/customize.py @@ -0,0 +1,40 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if key not in env: + env[key] = [] + bazel_install_root = os.path.join(os.getcwd(), "src", "bazel-out") + bazel_install_bin = os.path.join(os.getcwd(), "src", "bazel-bin") + inc_paths = [] + inc_paths.append(os.path.join(os.getcwd(), "src")) + inc_paths.append(bazel_install_bin) + inc_paths.append(os.path.join(bazel_install_bin, "external", "flatbuffers", "_virtual_includes", "flatbuffers")) + inc_paths.append(os.path.join(bazel_install_bin, "external", "FP16", "_virtual_includes", "FP16")) + inc_paths.append(os.path.join(bazel_install_bin, "external", "pthreadpool", "_virtual_includes", "pthreadpool")) + inc_paths.append(os.path.join(bazel_install_bin, "external", "cpuinfo", "_virtual_includes", "cpuinfo")) + + env['+C_INCLUDE_PATH'] = inc_paths + env['+CPLUS_INCLUDE_PATH'] = inc_paths + + tflite_lib = env.get("CM_TFLITE", "") + if tflite_lib == "on": + lib_path = os.path.join(bazel_install_bin, 'tensorflow', 'lite') + else: + lib_path = os.path.join(bazel_install_bin, 'tensorflow') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return':0} diff --git a/script/install-tensorflow-from-src/run.sh b/script/install-tensorflow-from-src/run.sh new file mode 100644 index 0000000000..d9090bf7d8 --- /dev/null +++ b/script/install-tensorflow-from-src/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} +if [ ! -d "src" ]; then + echo "Cloning Tensorflow from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} --depth ${CM_GIT_DEPTH}..." + git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} --depth ${CM_GIT_DEPTH} src +fi +CM_PYTHON_BIN=${CM_PYTHON_BIN:-python3} + +INSTALL_DIR="${CUR_DIR}" + +echo "******************************************************" +cd src +#./configure +#if [ "${?}" != "0" ]; then exit 1; fi + +if [ "${CM_TFLITE}" == "on" ]; then + cmd="${CM_BAZEL_BIN_WITH_PATH} build -c opt --define tflite_with_xnnpack=true //tensorflow/lite:libtensorflowlite.so" + echo $cmd + eval $cmd + if [ "${?}" != "0" ]; then exit 1; fi + exit 0 +fi +./configure +if [ "${?}" != "0" ]; then exit 1; fi +echo "******************************************************" +cmd="${CM_BAZEL_BIN_WITH_PATH} build //tensorflow/tools/pip_package:build_pip_package" +echo $cmd +eval $cmd +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg +if [ "${?}" != "0" ]; then exit 1; fi + + +# Clean build directory (too large) +cd ${INSTALL_DIR} +if [ "${CM_TENSORFLOW_CLEAN_BUILD}" != "no" ]; then + rm -rf build +fi + +echo "******************************************************" +echo "Tensorflow is built and installed to ${INSTALL_DIR} ..." diff --git a/script/install-terraform-from-src/README.md b/script/install-terraform-from-src/README.md new file mode 100644 index 0000000000..d839f25113 --- /dev/null +++ b/script/install-terraform-from-src/README.md @@ -0,0 +1,132 @@ +Automatically generated README for this automation recipe: **install-terraform-from-src** + +Category: **Cloud automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-terraform-from-src,d79d47a074f34428) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,terraform,from-src* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install terraform from-src" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,terraform,from-src` + +`cm run script --tags=install,terraform,from-src ` + +*or* + +`cmr "install terraform from-src"` + +`cmr "install terraform from-src " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,terraform,from-src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,terraform,from-src"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,terraform,from-src) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install terraform from-src" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `main` + +* `main` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,tool,go + - CM script: [get-go](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-go) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-terraform-from-src/_cm.json) + +___ +### Script output +`cmr "install terraform from-src " -j` +#### New environment keys (filter) + +* `+PATH` +* `CM_TERRAFORM_*` +#### New environment keys auto-detected from customize + +* `CM_TERRAFORM_BIN_WITH_PATH` +* `CM_TERRAFORM_INSTALLED_PATH` \ No newline at end of file diff --git a/script/install-terraform-from-src/_cm.json b/script/install-terraform-from-src/_cm.json new file mode 100644 index 0000000000..8732e16598 --- /dev/null +++ b/script/install-terraform-from-src/_cm.json @@ -0,0 +1,37 @@ +{ + "alias": "install-terraform-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Cloud automation", + "cache": true, + "clean_files": [], + "default_version": "main", + "deps": [ + { + "tags": "detect,cpu" + }, + { + "tags": "get,tool,go" + } + ], + "env": { + "CM_GIT_URL": "https://github.com/hashicorp/terraform.git" + }, + "new_env_keys": [ + "CM_TERRAFORM_*", + "+PATH" + ], + "tags": [ + "install", + "terraform", + "from-src" + ], + "uid": "d79d47a074f34428", + "versions": { + "main": { + "env": { + "CM_GIT_CHECKOUT": "main" + } + } + } +} diff --git a/script/install-terraform-from-src/customize.py b/script/install-terraform-from-src/customize.py new file mode 100644 index 0000000000..84fccf236e --- /dev/null +++ b/script/install-terraform-from-src/customize.py @@ -0,0 +1,19 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + env = i['env'] + installed_path = os.path.join(os.getcwd(), 'bin') + env['CM_TERRAFORM_INSTALLED_PATH'] = installed_path + env['CM_TERRAFORM_BIN_WITH_PATH'] = os.path.join(installed_path, "terraform") + env['+PATH'] = [ installed_path ] + + return {'return':0} diff --git a/script/install-terraform-from-src/run.sh b/script/install-terraform-from-src/run.sh new file mode 100644 index 0000000000..8cdb883026 --- /dev/null +++ b/script/install-terraform-from-src/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +CUR_DIR=${PWD} +if [ ! -d "terraform" ]; then + echo "Cloning Terraform from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT}..." + git clone -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} terraform +fi +test $? -eq 0 || exit 1 + +export GOPATH=$CUR_DIR +cd terraform +go install +test $? -eq 0 || exit 1 + +echo "******************************************************" +echo "Terraform is built and installed to ${GOPATH}/bin/terraform ..." diff --git a/script/install-tflite-from-src/README.md b/script/install-tflite-from-src/README.md new file mode 100644 index 0000000000..2fde922659 --- /dev/null +++ b/script/install-tflite-from-src/README.md @@ -0,0 +1,137 @@ +Automatically generated README for this automation recipe: **install-tflite-from-src** + +Category: **AI/ML frameworks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-tflite-from-src,5c72dab5eb88407c) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *get,install,tflite-cmake,tensorflow-lite-cmake,from-src* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "get install tflite-cmake tensorflow-lite-cmake from-src" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=get,install,tflite-cmake,tensorflow-lite-cmake,from-src` + +`cm run script --tags=get,install,tflite-cmake,tensorflow-lite-cmake,from-src ` + +*or* + +`cmr "get install tflite-cmake tensorflow-lite-cmake from-src"` + +`cmr "get install tflite-cmake tensorflow-lite-cmake from-src " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,tflite-cmake,tensorflow-lite-cmake,from-src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="get,install,tflite-cmake,tensorflow-lite-cmake,from-src"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,install,tflite-cmake,tensorflow-lite-cmake,from-src) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "get install tflite-cmake tensorflow-lite-cmake from-src" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GIT_DEPTH: `1` + +
+ +#### Versions +Default version: `master` + +* `master` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src/_cm.json)*** + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,compiler + * CM names: `--adr.['compiler']...` + - CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl) + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm) + * get,cmake + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tflite-from-src/_cm.json) + +___ +### Script output +`cmr "get install tflite-cmake tensorflow-lite-cmake from-src " -j` +#### New environment keys (filter) + +* `+CPLUS_INCLUDE_PATH` +* `+C_INCLUDE_PATH` +* `+DYLD_FALLBACK_LIBRARY_PATH` +* `+LD_LIBRARY_PATH` +#### New environment keys auto-detected from customize diff --git a/script/install-tflite-from-src/_cm.json b/script/install-tflite-from-src/_cm.json new file mode 100644 index 0000000000..ebd298b68f --- /dev/null +++ b/script/install-tflite-from-src/_cm.json @@ -0,0 +1,62 @@ +{ + "alias": "install-tflite-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "AI/ML frameworks", + "clean_files": [], + "default_version": "master", + "deps": [ + { + "tags": "detect,cpu" + }, + { + "tags": "get,compiler", + "names": [ + "compiler" + ] + }, + { + "tags": "get,cmake" + } + ], + "default_env": { + "CM_GIT_DEPTH": "1" + }, + "env": { + "CM_GIT_URL": "https://github.com/tensorflow/tensorflow" + }, + "extra_cache_tags_from_env": [ + { + "env": "CM_PYTHON_CACHE_TAGS", + "prefix": "python-" + } + ], + "new_env_keys": [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ], + "tags": [ + "get", + "install", + "tflite-cmake", + "tensorflow-lite-cmake", + "from-src" + ], + "uid": "5c72dab5eb88407c", + "versions": { + "master": { + "env": { + "CM_GIT_CHECKOUT": "master" + }, + "ad": { + "compiler": { + "tags": "gcc", + "version_min": "10.0.0" + } + } + } + } +} diff --git a/script/install-tflite-from-src/customize.py b/script/install-tflite-from-src/customize.py new file mode 100644 index 0000000000..ab0055816d --- /dev/null +++ b/script/install-tflite-from-src/customize.py @@ -0,0 +1,27 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if key not in env: + env[key] = [] + + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'src')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'src')) + + lib_path = os.path.join(os.getcwd(), 'build') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return':0} diff --git a/script/install-tflite-from-src/run.sh b/script/install-tflite-from-src/run.sh new file mode 100644 index 0000000000..fb453f2e69 --- /dev/null +++ b/script/install-tflite-from-src/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} +if [ ! -d "src" ]; then + echo "Cloning Tensorflow from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} --depth ${CM_GIT_DEPTH}..." + git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} --depth ${CM_GIT_DEPTH} src +fi + +INSTALL_DIR="${CUR_DIR}" +rm -rf ${INSTALL_DIR}/build + +cd ${INSTALL_DIR} +mkdir -p build +mkdir -p install + +echo "******************************************************" +cd build +cmake ../src/tensorflow/lite/c +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +cmake --build . -j${CM_MAKE_CORES} +if [ "${?}" != "0" ]; then exit 1; fi + + +echo "******************************************************" +echo "Tflite is built to ${INSTALL_DIR}/build ..." diff --git a/script/install-torchvision-from-src/README.md b/script/install-torchvision-from-src/README.md new file mode 100644 index 0000000000..34e41647f8 --- /dev/null +++ b/script/install-torchvision-from-src/README.md @@ -0,0 +1,195 @@ +Automatically generated README for this automation recipe: **install-torchvision-from-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-torchvision-from-src,68b855780d474546) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install get src from.src pytorchvision torchvision src-pytorchvision" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision` + +`cm run script --tags=install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision[,variations] ` + +*or* + +`cmr "install get src from.src pytorchvision torchvision src-pytorchvision"` + +`cmr "install get src from.src pytorchvision torchvision src-pytorchvision [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install get src from.src pytorchvision torchvision src-pytorchvision[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_cuda` + - Environment variables: + - *CUDA_HOME*: `<<>>` + - *CUDA_NVCC_EXECUTABLE*: `<<>>` + - *CUDNN_INCLUDE_PATH*: `<<>>` + - *CUDNN_LIBRARY_PATH*: `<<>>` + - *USE_CUDA*: `1` + - *USE_CUDNN*: `1` + - *TORCH_CUDA_ARCH_LIST*: `Ampere Ada Hopper` + - *TORCH_CXX_FLAGS*: `-D_GLIBCXX_USE_CXX11_ABI=1` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,cuda,_cudnn + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + * `_for-nvidia-mlperf-inference-v3.1` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/pytorch/vision`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/pytorch/vision` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/pytorch/vision` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo + * CM names: `--adr.['pytorchision-src-repo', 'torchision-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-torchvision-from-src/_cm.json) + +___ +### Script output +`cmr "install get src from.src pytorchvision torchvision src-pytorchvision [,variations]" -j` +#### New environment keys (filter) + +* `CM_PYTORCHVISION_*` +#### New environment keys auto-detected from customize diff --git a/script/install-torchvision-from-src/_cm.json b/script/install-torchvision-from-src/_cm.json new file mode 100644 index 0000000000..a474bb484a --- /dev/null +++ b/script/install-torchvision-from-src/_cm.json @@ -0,0 +1,140 @@ +{ + "alias": "install-torchvision-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_PYTORCH_VISION_SRC_REPO_PATH" + }, + "extra_cache_tags": "pytorchvision,torchvision,torchvision-src,src,pytorchvision-src,pytorchvision-src-repo", + "names": [ + "pytorchision-src-repo", + "torchision-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + "CM_GIT_URL": "https://github.com/pytorch/vision" + }, + "name": "Build pytorchvision from sources", + "new_env_keys": [ + "CM_PYTORCHVISION_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "pytorchvision", + "torchvision", + "src-pytorchvision" + ], + "uid": "68b855780d474546", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "cuda": { + "deps": [ + { + "names": [ + "cuda" + ], + "tags": "get,cuda,_cudnn" + } + ], + "env": { + "CUDA_HOME": "<<>>", + "CUDA_NVCC_EXECUTABLE": "<<>>", + "CUDNN_INCLUDE_PATH": "<<>>", + "CUDNN_LIBRARY_PATH": "<<>>", + "USE_CUDA": "1", + "USE_CUDNN": "1", + "TORCH_CUDA_ARCH_LIST": "Ampere Ada Hopper", + "TORCH_CXX_FLAGS": "-D_GLIBCXX_USE_CXX11_ABI=1" + } + }, + "for-nvidia-mlperf-inference-v3.1": { + "base": [ + "sha.657027f3", + "cuda" + ], + "deps": [ + { + "tags": "install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1" + } + ], + "env": { + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/pytorch/vision": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/pytorch/vision" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "pytorch-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-torchvision-from-src/customize.py b/script/install-torchvision-from-src/customize.py new file mode 100644 index 0000000000..a4963cca9a --- /dev/null +++ b/script/install-torchvision-from-src/customize.py @@ -0,0 +1,26 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + if env.get('CM_MLPERF_INFERENCE_INTEL', '') == "yes": + i['run_script_input']['script_name'] = "run-intel-mlperf-inference-v3_1" + run_cmd="CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . " + + env['CM_RUN_CMD'] = run_cmd + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return':0} + +def postprocess(i): + return {'return':0} diff --git a/script/install-torchvision-from-src/run.sh b/script/install-torchvision-from-src/run.sh new file mode 100644 index 0000000000..06de574ada --- /dev/null +++ b/script/install-torchvision-from-src/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf pytorchvision +cp -r ${CM_PYTORCH_VISION_SRC_REPO_PATH} pytorchvision +cd pytorchvision +test "${?}" -eq "0" || exit $? +rm -rf build + +python setup.py bdist_wheel +test "${?}" -eq "0" || exit $? +cd dist +python3 -m pip install torchvision*linux_x86_64.whl +test "${?}" -eq "0" || exit $? diff --git a/script/install-tpp-pytorch-extension/README.md b/script/install-tpp-pytorch-extension/README.md new file mode 100644 index 0000000000..b55bfe432c --- /dev/null +++ b/script/install-tpp-pytorch-extension/README.md @@ -0,0 +1,198 @@ +Automatically generated README for this automation recipe: **install-tpp-pytorch-extension** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-tpp-pytorch-extension,1701d2f5f4e84d42) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tpp-pytorch-extension)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,get,src,from.src,tpp-pex,src-tpp-pex* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install get src from.src tpp-pex src-tpp-pex" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,get,src,from.src,tpp-pex,src-tpp-pex` + +`cm run script --tags=install,get,src,from.src,tpp-pex,src-tpp-pex[,variations] ` + +*or* + +`cmr "install get src from.src tpp-pex src-tpp-pex"` + +`cmr "install get src from.src tpp-pex src-tpp-pex [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,tpp-pex,src-tpp-pex' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,get,src,from.src,tpp-pex,src-tpp-pex"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,get,src,from.src,tpp-pex,src-tpp-pex) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install get src from.src tpp-pex src-tpp-pex[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_for-intel-mlperf-inference-v3.1-gptj` + - Environment variables: + - *CM_CONDA_ENV*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,conda,_name.gptj-pt + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,generic,conda-package,_package.python + * CM names: `--adr.['conda-package', 'python3']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.wheel,_source.conda-forge + * CM names: `--adr.['conda-package', 'wheel']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.setuptools,_source.conda-forge + * CM names: `--adr.['conda-package', 'setuptools']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj + - CM script: [install-llvm-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-llvm-src) + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/libxsmm/tpp-pytorch-extension`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/libxsmm/tpp-pytorch-extension` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/libxsmm/tpp-pytorch-extension` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tpp-pytorch-extension/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,pytorch,from.src + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['pytorch']...` + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * get,git,repo + * CM names: `--adr.['tpp-pex-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tpp-pytorch-extension/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tpp-pytorch-extension/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tpp-pytorch-extension/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tpp-pytorch-extension/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-tpp-pytorch-extension/_cm.json) + +___ +### Script output +`cmr "install get src from.src tpp-pex src-tpp-pex [,variations]" -j` +#### New environment keys (filter) + +* `CM_TPP_PEX_*` +#### New environment keys auto-detected from customize diff --git a/script/install-tpp-pytorch-extension/_cm.json b/script/install-tpp-pytorch-extension/_cm.json new file mode 100644 index 0000000000..4b3ce8659e --- /dev/null +++ b/script/install-tpp-pytorch-extension/_cm.json @@ -0,0 +1,163 @@ +{ + "alias": "install-tpp-pytorch-extension", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "names": [ + "pytorch" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,pytorch,from.src" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_TPP_PEX_SRC_REPO_PATH" + }, + "extra_cache_tags": "tpp,tpp-pex,src,tpp-pex-src,tpp-pex-src-repo", + "names": [ + "tpp-pex-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": {}, + "name": "Build TPP-PEX from sources", + "new_env_keys": [ + "CM_TPP_PEX_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "tpp-pex", + "src-tpp-pex" + ], + "uid": "1701d2f5f4e84d42", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "for-intel-mlperf-inference-v3.1-gptj": { + "adr": { + "conda-package": { + "tags": "_name.gptj-pt" + }, + "pytorch": { + "tags": "_for-intel-mlperf-inference-v3.1-gptj" + } + }, + "base": [ + "branch.mlperf_infer_31" + ], + "deps": [ + { + "names": [ + "conda" + ], + "tags": "get,conda,_name.gptj-pt" + }, + { + "names": [ + "conda-package", + "python3" + ], + "tags": "get,generic,conda-package,_package.python", + "version": "3.9" + }, + { + "names": [ + "conda-package", + "wheel" + ], + "tags": "get,generic,conda-package,_package.wheel,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "setuptools" + ], + "tags": "get,generic,conda-package,_package.setuptools,_source.conda-forge" + }, + { + "tags": "install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj" + } + ], + "env": { + "CM_CONDA_ENV": "yes" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/libxsmm/tpp-pytorch-extension": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/libxsmm/tpp-pytorch-extension" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "pytorch-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-tpp-pytorch-extension/customize.py b/script/install-tpp-pytorch-extension/customize.py new file mode 100644 index 0000000000..35ee0e05a6 --- /dev/null +++ b/script/install-tpp-pytorch-extension/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + env['TPP_PEX_DIR'] = env['CM_TPP_PEX_SRC_REPO_PATH'] + env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 + env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] + env['LLVM_DIR'] = os.path.join(env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + + run_cmd="python setup.py clean && python setup.py install" + + env['CM_RUN_DIR'] = env['TPP_PEX_DIR'] + env['CM_RUN_CMD'] = run_cmd + + return {'return':0} diff --git a/script/install-tpp-pytorch-extension/run.sh b/script/install-tpp-pytorch-extension/run.sh new file mode 100644 index 0000000000..d426d4004e --- /dev/null +++ b/script/install-tpp-pytorch-extension/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:${PATH} + +cd ${CM_RUN_DIR} +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/script/install-transformers-from-src/README.md b/script/install-transformers-from-src/README.md new file mode 100644 index 0000000000..f1db25e6cd --- /dev/null +++ b/script/install-transformers-from-src/README.md @@ -0,0 +1,196 @@ +Automatically generated README for this automation recipe: **install-transformers-from-src** + +Category: **Compiler automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=install-transformers-from-src,88512c48ea5c4186) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-transformers-from-src)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *install,src,from.src,transformers,src-transformers* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "install src from.src transformers src-transformers" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=install,src,from.src,transformers,src-transformers` + +`cm run script --tags=install,src,from.src,transformers,src-transformers[,variations] ` + +*or* + +`cmr "install src from.src transformers src-transformers"` + +`cmr "install src from.src transformers src-transformers [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,from.src,transformers,src-transformers' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="install,src,from.src,transformers,src-transformers"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=install,src,from.src,transformers,src-transformers) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "install src from.src transformers src-transformers[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_branch.#` + - Environment variables: + - *CM_GIT_CHECKOUT*: `#` + - Workflow: + * `_for-intel-mlperf-inference-v3.1-bert` + - Environment variables: + - *CM_CONDA_ENV*: `yes` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,conda,_name.bert-pt + * CM names: `--adr.['conda']...` + - CM script: [get-conda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-conda) + * get,generic,conda-package,_package.python + * CM names: `--adr.['conda-package', 'python3']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.wheel,_source.conda-forge + * CM names: `--adr.['conda-package', 'wheel']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * get,generic,conda-package,_package.setuptools,_source.conda-forge + * CM names: `--adr.['conda-package', 'setuptools']...` + - CM script: [install-generic-conda-package](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-generic-conda-package) + * `_sha.#` + - Environment variables: + - *CM_GIT_CHECKOUT_SHA*: `#` + - Workflow: + * `_tag.#` + - Environment variables: + - *CM_GIT_CHECKOUT_TAG*: `#` + - Workflow: + +
+ + + * Group "**repo**" +
+ Click here to expand this section. + + * `_repo.#` + - Environment variables: + - *CM_GIT_URL*: `#` + - Workflow: + * **`_repo.https://github.com/pytorch/pytorch`** (default) + - Environment variables: + - *CM_GIT_URL*: `https://github.com/huggingface/transformers` + - Workflow: + +
+ + +#### Default variations + +`_repo.https://github.com/pytorch/pytorch` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-transformers-from-src/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,pytorch,from.src + * `if (CM_CONDA_ENV != yes)` + * CM names: `--adr.['pytorch']...` + - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src) + * get,git,repo + * CM names: `--adr.['transformers-src-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-transformers-from-src/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-transformers-from-src/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-transformers-from-src/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-transformers-from-src/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/install-transformers-from-src/_cm.json) + +___ +### Script output +`cmr "install src from.src transformers src-transformers [,variations]" -j` +#### New environment keys (filter) + +* `CM_TRANSFORMERS_*` +#### New environment keys auto-detected from customize diff --git a/script/install-transformers-from-src/_cm.json b/script/install-transformers-from-src/_cm.json new file mode 100644 index 0000000000..4251f84b73 --- /dev/null +++ b/script/install-transformers-from-src/_cm.json @@ -0,0 +1,161 @@ +{ + "alias": "install-transformers-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "names": [ + "pytorch" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,pytorch,from.src" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_TRANSFORMERS_SRC_REPO_PATH" + }, + "extra_cache_tags": "transformers,src,transformers-src,transformers-src-repo", + "names": [ + "transformers-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + "CM_GIT_URL": "https://github.com/huggingface/transformers" + }, + "name": "Build transformers from sources", + "new_env_keys": [ + "CM_TRANSFORMERS_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "src", + "from.src", + "transformers", + "src-transformers" + ], + "uid": "88512c48ea5c4186", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "for-intel-mlperf-inference-v3.1-bert": { + "adr": { + "conda-package": { + "tags": "_name.bert-pt" + }, + "pytorch": { + "tags": "_for-intel-mlperf-inference-v3.1-bert" + } + }, + "base": [ + "sha.9f4e0c23d68366985f9f584388874477ad6472d8" + ], + "deps": [ + { + "names": [ + "conda" + ], + "tags": "get,conda,_name.bert-pt" + }, + { + "names": [ + "conda-package", + "python3" + ], + "tags": "get,generic,conda-package,_package.python", + "version": "3.8" + }, + { + "names": [ + "conda-package", + "wheel" + ], + "tags": "get,generic,conda-package,_package.wheel,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "setuptools" + ], + "tags": "get,generic,conda-package,_package.setuptools,_source.conda-forge" + } + ], + "env": { + "CM_CONDA_ENV": "yes" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/pytorch/pytorch": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/huggingface/transformers" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "pytorch-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-transformers-from-src/customize.py b/script/install-transformers-from-src/customize.py new file mode 100644 index 0000000000..29f69cfc69 --- /dev/null +++ b/script/install-transformers-from-src/customize.py @@ -0,0 +1,21 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd="python setup.py install" + + env['CM_RUN_CMD'] = run_cmd + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return':0} diff --git a/script/install-transformers-from-src/run.sh b/script/install-transformers-from-src/run.sh new file mode 100644 index 0000000000..8af8c6c77a --- /dev/null +++ b/script/install-transformers-from-src/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +CUR_DIR=$PWD +echo $PWD +rm -rf transformers +cmd="cp -r ${CM_TRANSFORMERS_SRC_REPO_PATH} transformers" +echo "$cmd" +eval "$cmd" +cd transformers +rm -rf build + +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/transformers.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply transformers.patch +if [ "${?}" != "0" ]; then exit 1; fi + +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/script/launch-benchmark/README-extra.md b/script/launch-benchmark/README-extra.md new file mode 100644 index 0000000000..3854e8ecb1 --- /dev/null +++ b/script/launch-benchmark/README-extra.md @@ -0,0 +1,3 @@ +# CM script + +Universal benchmark launcher via Collective Mind diff --git a/script/launch-benchmark/README.md b/script/launch-benchmark/README.md new file mode 100644 index 0000000000..2e8a813899 --- /dev/null +++ b/script/launch-benchmark/README.md @@ -0,0 +1,118 @@ +Automatically generated README for this automation recipe: **launch-benchmark** + +Category: **Collective benchmarking** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=launch-benchmark,5dc7662804bc4cad) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/launch-benchmark)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *launch,benchmark* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "launch benchmark" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=launch,benchmark` + +`cm run script --tags=launch,benchmark ` + +*or* + +`cmr "launch benchmark"` + +`cmr "launch benchmark " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'launch,benchmark' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="launch,benchmark"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=launch,benchmark) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "launch benchmark" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/launch-benchmark/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/launch-benchmark/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/launch-benchmark/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/launch-benchmark/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/launch-benchmark/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/launch-benchmark/_cm.yaml) + +___ +### Script output +`cmr "launch benchmark " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/launch-benchmark/_cm.yaml b/script/launch-benchmark/_cm.yaml new file mode 100644 index 0000000000..f45606bc24 --- /dev/null +++ b/script/launch-benchmark/_cm.yaml @@ -0,0 +1,15 @@ +alias: launch-benchmark +uid: 5dc7662804bc4cad + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- launch +- benchmark + +category: "Collective benchmarking" + +gui: + title: "Launch benchmark" + use_customize_func: "gui" diff --git a/script/launch-benchmark/customize.py b/script/launch-benchmark/customize.py new file mode 100644 index 0000000000..5db5e9f817 --- /dev/null +++ b/script/launch-benchmark/customize.py @@ -0,0 +1,732 @@ +import cmind +import os +import copy + +base_path={} +base_path_meta={} + +################################################################################## +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +################################################################################## +def postprocess(i): + + env = i['env'] + + return {'return':0} + + +################################################################################## +def load_cfg(i): + + tags = i.get('tags','') + artifact = i.get('artifact','') + + key = i.get('key','') + + ii={'action':'find', + 'automation':'cfg'} + if artifact!='': + ii['artifact']=artifact + elif tags!='': + ii['tags']=tags + + r=cmind.access(ii) + if r['return']>0: return r + + lst = r['list'] + + prune = i.get('prune',{}) + prune_key = prune.get('key', '') + prune_key_uid = prune.get('key_uid', '') + prune_uid = prune.get('uid', '') + prune_list = prune.get('list',[]) + + # Checking individual files inside CM entry + selection = [] + + if i.get('skip_files', False): + for l in lst: + meta = l.meta + full_path = l.path + + meta['full_path']=full_path + + add = True + + if prune_key!='' and prune_key_uid!='': + if prune_key_uid not in meta.get(prune_key, []): + add = False + + if add: + selection.append(meta) + else: + for l in lst: + path = l.path + + main_meta = l.meta + all_tags = main_meta.get('tags',[]) + + files = os.listdir(path) + + for f in files: + if key!='' and not f.startswith(key): + continue + + if f.startswith('_') or (not f.endswith('.json') and not f.endswith('.yaml')): + continue + + full_path = os.path.join(path, f) + + full_path_without_ext = full_path[:-5] + + r = cmind.utils.load_yaml_and_json(full_path_without_ext) + if r['return']>0: + print ('Warning: problem loading file {}'.format(full_path)) + else: + meta = r['meta'] + + # Check base + r = process_base(meta, full_path) + if r['return']>0: return r + meta = r['meta'] + + uid = meta['uid'] + + # Check pruning + add = True + + if len(prune)>0: + if prune_uid!='' and uid != prune_uid: + add = False + + if add and len(prune_list)>0 and uid not in prune_list: + add = False + + if add and prune_key!='' and prune_key_uid!='' and prune_key_uid != meta.get(prune_key, None): + add = False + + if add: + meta['full_path']=full_path + + add_all_tags = copy.deepcopy(all_tags) + + name = meta.get('name','') + if name=='': + name = ' '.join(meta.get('tags',[])) + name = name.strip() + meta['name'] = name + + file_tags = meta.get('tags', '').strip() + if file_tags=='': + if name!='': + add_all_tags += [v.lower() for v in name.split(' ')] + else: + add_all_tags += file_tags.split(',') + + meta['all_tags']=add_all_tags + + meta['main_meta']=main_meta + + selection.append(meta) + + return {'return':0, 'lst':lst, 'selection':selection} + +################################################################################## +def process_base(meta, full_path): + + global base_path, base_path_meta + + _base = meta.get('_base', '') + if _base != '': + name = '' + + filename = _base + full_path_base = os.path.dirname(full_path) + + if not filename.endswith('.yaml') and not filename.endswith('.json'): + return {'return':1, 'error':'_base file {} in {} must be .yaml or .json'.format(filename, full_path)} + + if ':' in _base: + x = _base.split(':') + name = x[0] + + full_path_base = base_path.get(name, '') + if full_path_base == '': + + # Find artifact + r = cmind.access({'action':'find', + 'automation':'cfg', + 'artifact':name}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + if not os.path.isfile(path): + return {'return':1, 'error':'_base artifact {} not found in {}'.format(name, full_path)} + + full_path_base = lst[0].path + + base_path[name] = full_path_base + + filename = x[1] + + # Load base + path = os.path.join(full_path_base, filename) + + if not os.path.isfile(path): + return {'return':1, 'error':'_base file {} not found in {}'.format(filename, full_path)} + + if path in base_path_meta: + base = copy.deepcopy(base_path_meta[path]) + else: + path_without_ext = path[:-5] + + r = cmind.utils.load_yaml_and_json(path_without_ext) + if r['return']>0: return r + + base = r['meta'] + + base_path_meta[path]=copy.deepcopy(base) + + for k in meta: + v = meta[k] + + if k not in base: + base[k]=v + else: + if isinstance(v, str): + # Only merge a few special keys and overwrite the rest + if k in ['tags','name']: + base[k] += meta[k] + else: + base[k] = meta[k] + + elif type(v) == list: + for vv in v: + base[k].append(vv) + elif type(v) == dict: + base[k].merge(v) + + meta = base + + return {'return':0, 'meta':meta} + + + + +################################################################################## +def get_with_complex_key(meta, key): + + j = key.find('.') + + if j<0: + return meta.get(key) + + key0 = key[:j] + + if key0 not in meta: + return None + + return get_with_complex_key(meta[key0], key[j+1:]) + +################################################################################## +def get_with_complex_key_safe(meta, key): + v = get_with_complex_key(meta, key) + + if v == None: v='' + + return v + +################################################################################## +def prepare_table(i): + + import pandas as pd + import numpy as np + + selection = i['selection'] + misc = i['misc_module'] + + html = '' + + all_data = [] + +# dimensions = [('input.model', 'MLPerf model'), +# ('input.implementation', 'MLPerf implementation'), +# ('input.framework', 'MLPerf framework')] + + dimensions = i.get('dimensions', []) + + dimension_values = {} + dimension_keys = [] + + if len(dimensions) == 0: + keys = [('test', 'CM test', 400, 'leftAligned')] + else: + keys = [('test', 'CM test', 50, 'leftAligned')] + + for k in dimensions: + key = k[0] + + keys.append((k[0], k[1], 100, 'leftAligned')) + + dimension_values[key] = [] + dimension_keys.append(key) + +# # assemble all values +# for s in selection: +# for k in dimensions: +# key = k[0] +# +# value = get_with_complex_key(selection, key) +# +# if value!=None and value!='' and value not in dimension_values[key]: +# dimension_values.append(value) + + # If dimensions, sort by dimensions + for d in list(reversed(dimension_keys)): + selection = sorted(selection, key = lambda x: get_with_complex_key_safe(selection, d)) + + + + keys += [ + ('functional', 'Functional', 80, ''), + ('reproduced', 'Reproduced', 80, ''), + ('notes', 'Notes', 200, 'lefAligned'), + ] + + j = 0 + + badges_url={'functional':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png', + 'reproduced':'https://cTuning.org/images/results_reproduced_v1_1_small.png'} + + + + + + + for s in selection: + row = {} + + j += 1 + + uid = s['uid'] + + url = misc.make_url(uid, key='uid', action='howtorun', md=False) + + name = s.get('name','') + if name == '': name = uid + + + if len(dimensions) == 0: + row['test'] = '{}'.format(url, name) + else: + row['test'] = 'View'.format(url) + for k in dimensions: + kk = k[0] + + v = get_with_complex_key_safe(s, kk) + + row[kk] = str(v) + + + + + # Check ACM/IEEE functional badge + x = '' + if s.get('functional', False): + x = '
'.format(url, badges_url['functional']) + row['functional'] = x + + # Check ACM/IEEE reproduced badge + x = '' + if s.get('reproduced', False): + x = '
'.format(url, badges_url['reproduced']) + row['reproduced'] = x + + # Check misc notes + row['notes']=s.get('notes','') + + # Finish row + all_data.append(row) + + + # Visualize table + pd_keys = [v[0] for v in keys] + pd_key_names = [v[1] for v in keys] + + pd_all_data = [] + for row in sorted(all_data, key=lambda row: (row.get('x1',0))): + pd_row=[] + for k in pd_keys: + pd_row.append(row.get(k)) + pd_all_data.append(pd_row) + + df = pd.DataFrame(pd_all_data, columns = pd_key_names) + + df.index+=1 + + return {'return':0, 'df':df} + + + + + + +################################################################################## +def gui(i): + + params = i['params'] + st = i['streamlit_module'] + misc = i['misc_module'] + meta = i['meta'] + gui_meta = meta['gui'] + skip_header = i.get('skip_title', False) + + end_html = '' + + if not skip_header: + # Title + title = gui_meta['title'] + + st.title('[Collective Mind](https://github.com/mlcommons/ck)') + + st.markdown('### {}'.format(title)) + + + + + + # Check if test uid is specified + uid = '' + x = params.get('uid',['']) + if len(x)>0 and x[0]!='': uid = x[0].strip() + + bench_uid = '' + x = params.get('bench_uid',['']) + if len(x)>0 and x[0]!='': bench_uid = x[0].strip() + + compute_uid = '' + x = params.get('compute_uid',['']) + if len(x)>0 and x[0]!='': compute_uid = x[0].strip() + + + + + ############################################################## + # Check the first level of benchmarks + ii = {'tags':'benchmark,run', 'skip_files':True, 'prune':{}} + + if uid != '': + ii['skip_files'] = False + ii['prune']['uid']=uid + if bench_uid !='': + ii['artifact']=bench_uid + if compute_uid !='': + ii['prune']['key']='supported_compute' + ii['prune']['key_uid']=compute_uid + + r=load_cfg(ii) + if r['return']>0: return r + + lst = r['selection'] + + if len(lst)==0: + st.markdown('Warning: no benchmarks found!') + return {'return':0} + + test_meta = {} + + bench_id = 0 + + + ########################################################################################################### + if uid != '': + if len(lst)==0: + st.markdown('CM test with UID "{}" not found!'.format(uid)) + return {'return':0} + elif len(lst)>1: + st.markdown('Warning: More than 1 CM test found with UID "{}" - ambiguity!'.format(uid)) + return {'return':0} + + test_meta = lst[0] + + bench_id = 1 + compute_uid = test_meta['compute_uid'] + bench_supported_compute = [compute_uid] + + + if uid == '': + selection = sorted(lst, key = lambda v: v['name']) + bench_selection = [{'name':''}] + selection + + if bench_uid !='': + bench_id_index = 1 + else: + # Check if want to force some benchmark by default + # 27c06c35bceb4059 == MLPerf inference v4.0 + + bench_id_index = 0 + + j=0 + for b in bench_selection: + if b.get('uid','')=='27c06c35bceb4059': + bench_id_index=j + break + j+=1 + + + bench_id = st.selectbox('Select benchmark:', + range(len(bench_selection)), + format_func=lambda x: bench_selection[x]['name'], + index = bench_id_index, + key = 'bench') + + + bench_supported_compute = [] + bench_meta = {} + if bench_id>0: + bench_meta = bench_selection[bench_id] + bench_supported_compute = bench_meta.get('supported_compute',[]) + + urls = bench_meta.get('urls',[]) + if len(urls)>0: + x = '\n' + for u in urls: + name = u['name'] + url = u['url'] + + x+=' [ [{}]({}) ] '.format(name, url) + x+='\n' + + st.markdown(x) + + ########################################################################################################### + if True==True: + ############################################################## + # Check compute + + ii = {'tags':'benchmark,compute'} + if bench_id>0: + if compute_uid !='': + x = [compute_uid] + else: + x = bench_supported_compute + if len(x) == 0: + st.markdown('Warning: no supported compute selected!') + return {'return':0} + + ii['prune']={'list':x} + + r=load_cfg(ii) + if r['return']>0: return r + + selection = sorted(r['selection'], key = lambda v: v['name']) + + if len(selection) == 0 : + st.markdown('Warning: no supported compute found!') + return {'return':0} + + compute_selection = [{'name':''}] + if len(selection)>0: + compute_selection += selection + + compute_id_index = 0 if compute_uid == '' else 1 + + if uid == '': + compute_id = st.selectbox('Select target hardware to benchmark:', + range(len(compute_selection)), + format_func=lambda x: compute_selection[x]['name'], + index = compute_id_index, + key = 'compute') + + compute = {} + if compute_id>0: + compute = compute_selection[compute_id] + compute_uid = compute['uid'] + + compute_meta = {} + for c in compute_selection: + if c.get('uid','')!='': + compute_meta[c['uid']]=c + + ########################################################################################################### + if uid == '': + + ############################################################## + # Check tests + ii = {'tags':'benchmark,run'} + + if bench_id>0: + bench_uid = bench_selection[bench_id]['uid'] + ii['artifact']=bench_uid + if compute_uid!='': + ii['prune']={'key':'compute_uid', 'key_uid':compute_uid} + + r=load_cfg(ii) + if r['return']>0: return r + + selection = sorted(r['selection'], key = lambda v: v['name']) + + # Check how many and prune + if len(selection) == 0: + st.markdown('No CM tests found') + return {'return':0} + + for s in selection: + c_uid = s.get('compute_uid','') + if c_uid!='': + c_tags = compute_meta[c_uid].get('tags','') + if c_tags!='': + s['all_tags']+=c_tags.split(',') + + s['compute_meta']=compute_meta[c_uid] + + + if len(selection)>1: + # Update selection with compute tags + test_tags = '' + x = params.get('tags',['']) + if len(x)>0 and x[0]!='': test_tags = x[0].strip() + + test_tags = st.text_input('Found {} CM tests. Prune them by tags:'.format(str(len(selection))), value=test_tags, key='test_tags').strip() + + if test_tags!='': + test_tags_list = test_tags.replace(' ',',').split(',') + + pruned_selection = [] + + for s in selection: + all_tags = s['all_tags'] + + add = True + + for t in test_tags_list: + if t not in all_tags: + add = False + break + + if add: + pruned_selection.append(s) + + selection = pruned_selection + + test_selection = [{'name':''}] + selection + + + + if len(selection)<200: + # Creating compute selector + test_id_index = 1 if len(selection)==1 else 0 + + test_id = st.selectbox('Select a test from {}:'.format(str(len(selection))), + range(len(test_selection)), + format_func=lambda x: test_selection[x]['name'], + index = test_id_index, + key = 'test') + + + if test_id >0: + test_meta = test_selection[test_id] + else: + ######################################################################### + # View many (table) + ii = {'selection':selection, + 'misc_module':misc} + + # Check if dimensions in the bench + dimensions = bench_meta.get('dimensions', []) + if len(dimensions)>0: + viewer_selection = ['benchmark specific', 'universal'] + + viewer = st.selectbox('Viewer:', viewer_selection, key = 'viewer') + + if viewer == 'benchmark specific': + ii['dimensions'] = dimensions + + else: + st.markdown('---') + + r = prepare_table(ii) + if r['return']>0: return r + + df = r['df'] + + html=df.to_html(escape=False, justify='left') + st.write(html, unsafe_allow_html = True) + +# st.dataframe(df, unsafe_allow_html = True) + + + + + + + + ############################################################## + # Show individual test + if len(test_meta)>0: + if uid != '': + c_uid = test_meta.get('compute_uid','') + if c_uid!='': + c_tags = compute_meta[c_uid].get('tags','') + if c_tags!='': + test_meta['all_tags']+=c_tags.split(',') + + test_meta['compute_meta']=compute_meta[c_uid] + + + if uid == '': + st.markdown('---') + + uid = test_meta['uid'] + + # First, check if there is a README + test_path = test_meta['full_path'] + + test_md = test_meta['full_path'][:-5]+'.md' + if os.path.isfile(test_md): + + r = cmind.utils.load_txt(test_md) + if r['return']>0: return r + + s = r['string'] + + st.markdown(s) + + # Next print some info (for now JSON) + import json + x = """ +--- +**CM test dictionary:** +```json +{} +``` + """.format(json.dumps(test_meta, indent=2)) + st.markdown(x) + + + + + + + # Create self link + # This misc module is in CM "gui" script + x1 = misc.make_url(uid, key='uid', action='howtorun', md=False) + end_html='
Self link
'.format(x1) + + return {'return':0, 'end_html': end_html} + diff --git a/script/launch-benchmark/tests/debug.py b/script/launch-benchmark/tests/debug.py new file mode 100644 index 0000000000..842003b2c6 --- /dev/null +++ b/script/launch-benchmark/tests/debug.py @@ -0,0 +1,6 @@ +import cmind + +r=cmind.access({'action':'gui', + 'automation':'script', + 'artifact':'launch benchmark'}) +print (r) diff --git a/script/prepare-training-data-bert/README.md b/script/prepare-training-data-bert/README.md new file mode 100644 index 0000000000..0cf162f6e1 --- /dev/null +++ b/script/prepare-training-data-bert/README.md @@ -0,0 +1,195 @@ +Automatically generated README for this automation recipe: **prepare-training-data-bert** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=prepare-training-data-bert,1e06a7abe23545eb) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *prepare,mlperf,training,data,input,bert* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "prepare mlperf training data input bert" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=prepare,mlperf,training,data,input,bert` + +`cm run script --tags=prepare,mlperf,training,data,input,bert[,variations] [--input_flags]` + +*or* + +`cmr "prepare mlperf training data input bert"` + +`cmr "prepare mlperf training data input bert [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prepare,mlperf,training,data,input,bert' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="prepare,mlperf,training,data,input,bert"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=prepare,mlperf,training,data,input,bert) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "prepare mlperf training data input bert[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**implementation**" +
+ Click here to expand this section. + + * **`_nvidia`** (default) + - Environment variables: + - *CM_TMP_VARIATION*: `nvidia` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,git,repo,_repo.https://github.com/wchen61/training_results_v2.1,_branch.fix_bert_prepare_data + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * `_reference` + - Environment variables: + - *CM_TMP_VARIATION*: `reference` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,mlperf,training,src + * CM names: `--adr.['mlperf-training-src']...` + - CM script: [get-mlperf-training-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-src) + * get,python3 + * CM names: `--adr.['python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_tensorflow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_protobuf + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + +#### Default variations + +`_nvidia` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--clean=value` → `CM_MLPERF_TRAINING_CLEAN_TFRECORDS=value` +* `--data_dir=value` → `CM_DATA_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "clean":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/_cm.json)*** + * download,file,_gdown,_url.https://drive.google.com/uc?id=1fbGClQMi2CoMv7fwrwTC5YYPooQBdcFW + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_gdown,_url.https://drive.google.com/uc?id=1USK108J6hMM_d27xCHi738qBL8_BT1u1 + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_gdown,_url.https://drive.google.com/uc?id=1tmMgLwoBvbEJEHXh77sqrXYw5RpqT8R_ + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download-and-extract,file,_gdown,_extract,_url.https://drive.google.com/uc?id=14xV2OUGSQDG_yDBrmbSdcDC-QGeqpfs_ + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + * download,file,_gdown,_url.https://drive.google.com/uc?id=1chiTBljF0Eh1U5pKs6ureVHgSbtU8OG_ + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_gdown,_url.https://drive.google.com/uc?id=1Q47V3K3jFRkbJ2zGCrKkKk-n0fvMZsa0 + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_gdown,_url.https://drive.google.com/uc?id=1vAcVmXSLsLeQ1q7gvHnQUSth5W_f_pwv + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + 1. ***Run native script if exists*** + * [run-nvidia.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/run-nvidia.sh) + * [run-reference.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/run-reference.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-bert/_cm.json) + +___ +### Script output +`cmr "prepare mlperf training data input bert [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_TRAINING_BERT_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_TRAINING_BERT_CONFIG_PATH` +* `CM_MLPERF_TRAINING_BERT_DATA_PATH` +* `CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH` +* `CM_MLPERF_TRAINING_BERT_VOCAB_PATH` \ No newline at end of file diff --git a/script/prepare-training-data-bert/_cm.json b/script/prepare-training-data-bert/_cm.json new file mode 100644 index 0000000000..fee0359aea --- /dev/null +++ b/script/prepare-training-data-bert/_cm.json @@ -0,0 +1,149 @@ +{ + "alias": "prepare-training-data-bert", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "deps": [], + "input_description": {}, + "input_mapping": { + "data_dir": "CM_DATA_DIR", + "clean": "CM_MLPERF_TRAINING_CLEAN_TFRECORDS" + }, + "new_env_keys": [ + "CM_MLPERF_TRAINING_BERT_*" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [ + { + "tags": "download,file,_gdown,_url.https://drive.google.com/uc?id=1fbGClQMi2CoMv7fwrwTC5YYPooQBdcFW", + "env": { + "CM_DOWNLOAD_FILENAME": "bert_config.json", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_CONFIG_FILE_PATH", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_CHECKSUM": "7f59165e21b7d566db610ff6756c926b" + }, + "force_cache": true, + "extra_cache_tags": "mlperf,training,bert,config" + }, + { + "tags": "download,file,_gdown,_url.https://drive.google.com/uc?id=1USK108J6hMM_d27xCHi738qBL8_BT1u1", + "env": { + "CM_DOWNLOAD_FILENAME": "vocab.txt", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_VOCAB_FILE_PATH", + "CM_DOWNLOAD_CHECKSUM": "64800d5d8528ce344256daf115d4965e" + }, + "force_cache": true, + "extra_cache_tags": "bert,vocab" + }, + { + "tags": "download,file,_gdown,_url.https://drive.google.com/uc?id=1tmMgLwoBvbEJEHXh77sqrXYw5RpqT8R_", + "env": { + "CM_DOWNLOAD_FILENAME": "bert_reference_results_text_md5.txt", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_REFERENCE_RESULTS_TEXT_MD5_FILE_PATH", + "CM_DOWNLOAD_CHECKSUM": "7d3a0619cb8bf7e829af99fa5c29daa8" + }, + "force_cache": true, + "extra_cache_tags": "bert,data,results,md5" + }, + { + "tags": "download-and-extract,file,_gdown,_extract,_url.https://drive.google.com/uc?id=14xV2OUGSQDG_yDBrmbSdcDC-QGeqpfs_", + "env": { + "CM_DOWNLOAD_FILENAME": "results_text.tar.gz", + "CM_EXTRACT_EXTRACTED_FILENAME": "results4", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_EXTRACT_PATH": "<<>>", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_BERT_TRAINING_DATA_PATH", + "CM_DOWNLOAD_CHECKSUM": "", + "CM_EXTRACT_EXTRACTED_CHECKSUM_FILE": "<<>>" + }, + "force_cache": true, + "extra_cache_tags": "bert,data,results" + }, + { + "tags": "download,file,_gdown,_url.https://drive.google.com/uc?id=1chiTBljF0Eh1U5pKs6ureVHgSbtU8OG_", + "env": { + "CM_DOWNLOAD_FILENAME": "model.ckpt-28252.data-00000-of-00001", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_BERT_CHECKPOINT_FILE_PATH", + "CM_DOWNLOAD_CHECKSUM": "50797acd537880bfb5a7ade80d976129" + }, + "force_cache": true, + "extra_cache_tags": "bert,checkpoint,data" + }, + { + "tags": "download,file,_gdown,_url.https://drive.google.com/uc?id=1Q47V3K3jFRkbJ2zGCrKkKk-n0fvMZsa0", + "env": { + "CM_DOWNLOAD_FILENAME": "model.ckpt-28252.index", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_CHECKSUM": "f97de3ae180eb8d479555c939d50d048" + }, + "force_cache": true, + "extra_cache_tags": "bert,checkpoint,index" + }, + { + "tags": "download,file,_gdown,_url.https://drive.google.com/uc?id=1vAcVmXSLsLeQ1q7gvHnQUSth5W_f_pwv", + "env": { + "CM_DOWNLOAD_FILENAME": "model.ckpt-28252.meta", + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_CHECKSUM": "dbd16c731e8a8113bc08eeed0326b8e7" + }, + "force_cache": true, + "extra_cache_tags": "bert,checkpoint,meta" + } + ], + "tags": [ + "prepare", + "mlperf", + "training", + "data", + "input", + "bert" + ], + "uid": "1e06a7abe23545eb", + "variations": { + "nvidia": { + "group": "implementation", + "default": true, + "deps": [ + { + "tags": "get,git,repo,_repo.https://github.com/wchen61/training_results_v2.1,_branch.fix_bert_prepare_data", + "extra_cache_tags": "mlperf,training,results" + } + ], + "env": { + "CM_TMP_VARIATION": "nvidia" + } + }, + "reference": { + "group": "implementation", + "deps": [ + { + "tags": "get,mlperf,training,src", + "names": [ "mlperf-training-src" ] + }, + { + "tags": "get,python3", + "names": [ "python3" ] + }, + { + "tags": "get,generic-python-lib,_tensorflow", + "version": "2.4.0" + }, + { + "tags": "get,generic-python-lib,_protobuf", + "version_max": "3.20.1", + "version_max_usable": "3.20.1" + } + ], + "env": { + "CM_TMP_VARIATION": "reference" + } + } + }, + "versions": {} +} diff --git a/script/prepare-training-data-bert/customize.py b/script/prepare-training-data-bert/customize.py new file mode 100644 index 0000000000..a0cf7beb2c --- /dev/null +++ b/script/prepare-training-data-bert/customize.py @@ -0,0 +1,50 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + datadir = env.get('CM_DATA_DIR', os.getcwd()) + env['CM_DATA_DIR'] = datadir + + env['CM_BERT_CONFIG_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + env['CM_BERT_VOCAB_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + env['CM_BERT_DATA_DOWNLOAD_DIR'] = os.path.join(datadir, "download") + + env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + code_path = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], 'NVIDIA', 'benchmarks', 'bert', 'implementations', 'pytorch-22.09') + env['CM_RUN_DIR'] = code_path + elif env.get("CM_TMP_VARIATION", "") == "reference": + code_path = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], 'language_model', 'tensorflow', 'bert', 'cleanup_scripts') + env['CM_RUN_DIR'] = code_path + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + data_dir = env['CM_DATA_DIR'] + env['CM_MLPERF_TRAINING_BERT_DATA_PATH'] = data_dir + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join(data_dir, "hdf5", "eval", "eval_all.hdf5") + elif env.get("CM_TMP_VARIATION", "") == "reference": + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join(data_dir, "tfrecords", "eval_10k") + env['CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH'] = os.path.join(data_dir, "tfrecords") + + env['CM_MLPERF_TRAINING_BERT_VOCAB_PATH'] = env['CM_BERT_VOCAB_FILE_PATH'] + env['CM_MLPERF_TRAINING_BERT_CONFIG_PATH'] = env['CM_BERT_CONFIG_FILE_PATH'] + + return {'return':0} diff --git a/script/prepare-training-data-bert/run-nvidia.sh b/script/prepare-training-data-bert/run-nvidia.sh new file mode 100644 index 0000000000..23cd41289b --- /dev/null +++ b/script/prepare-training-data-bert/run-nvidia.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +CUR=${CM_DATA_DIR:-"$PWD/data"} +run "cd \"${CM_RUN_DIR}\"" +run "docker build --pull -t mlperf-nvidia:language_model ." +run "ID=`docker run -dt --runtime=nvidia --ipc=host -v $CUR:/workspace/bert_data mlperf-nvidia:language_model bash`" +run "docker exec $ID bash -c 'cd /workspace/bert && ./input_preprocessing/prepare_data.sh -s --outputdir /workspace/bert_data'" diff --git a/script/prepare-training-data-bert/run-reference.sh b/script/prepare-training-data-bert/run-reference.sh new file mode 100644 index 0000000000..97524312fa --- /dev/null +++ b/script/prepare-training-data-bert/run-reference.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD +DATA_DIR=${CM_DATA_DIR:-"$PWD/data"} + +cd ${CM_RUN_DIR} +mkdir -p ${DATA_DIR}/tfrecords +for i in $(seq -f "%05g" 0 499) +do + FILENAME="${DATA_DIR}/tfrecords/part-${i}-of-00500" + if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 500000000 ]] ; then + echo "Skipping regenerating existing ${FILENAME}" + continue; + fi + cmd="python3 create_pretraining_data.py \ + --input_file=${CM_BERT_DATA_DOWNLOAD_DIR}/results4/part-${i}-of-00500 \ + --output_file=${DATA_DIR}/tfrecords/part-${i}-of-00500 \ + --vocab_file=${CM_BERT_VOCAB_FILE_PATH} \ + --do_lower_case=True \ + --max_seq_length=512 \ + --max_predictions_per_seq=76 \ + --masked_lm_prob=0.15 \ + --random_seed=12345 \ + --dupe_factor=10" + run "$cmd" +done + +FILENAME="${DATA_DIR}/eval_intermediate" +if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 800000000 ]] ; then + echo "Skipping regenerating existing ${FILENAME}" +else + cmd="python3 create_pretraining_data.py \ + --input_file=${CM_BERT_DATA_DOWNLOAD_DIR}/results4/eval.txt \ + --output_file=${DATA_DIR}/eval_intermediate \ + --vocab_file=${CM_BERT_VOCAB_FILE_PATH} \ + --do_lower_case=True \ + --max_seq_length=512 \ + --max_predictions_per_seq=76 \ + --masked_lm_prob=0.15 \ + --random_seed=12345 \ + --dupe_factor=10" + + run "$cmd" +fi + +FILENAME=${DATA_DIR}/tfrecords/eval_10k +if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 25000000 ]] ; then + echo "Skipping regenerating existing ${FILENAME}" +else + cmd="python3 pick_eval_samples.py \ + --input_tfrecord=${DATA_DIR}/eval_intermediate \ + --output_tfrecord=${DATA_DIR}/tfrecords/eval_10k \ + --num_examples_to_pick=10000" + + run "$cmd" +fi diff --git a/script/prepare-training-data-bert/run.sh b/script/prepare-training-data-bert/run.sh new file mode 100644 index 0000000000..ea6fd8aca3 --- /dev/null +++ b/script/prepare-training-data-bert/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +CUR=${CM_DATA_DIR:-"$PWD/data"} +cd ${CM_RUN_DIR} + +if [[ ${CM_TMP_VARIATION} == "nvidia" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-nvidia.sh +elif [[ ${CM_TMP_VARIATION} == "reference" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-reference.sh +fi diff --git a/script/prepare-training-data-bert/run_config.yml b/script/prepare-training-data-bert/run_config.yml new file mode 100644 index 0000000000..e39692ebc5 --- /dev/null +++ b/script/prepare-training-data-bert/run_config.yml @@ -0,0 +1,13 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + fake_run_deps: true + mounts: + - ${{ CM_DATA_DIR }}:${{ CM_DATA_DIR }} + +run_with_default_inputs: true #if false the script won't run automatic tests + +minimum_system_requirements: + ram: 512 #in GB + disk_space: 900 #in GB diff --git a/script/prepare-training-data-resnet/README.md b/script/prepare-training-data-resnet/README.md new file mode 100644 index 0000000000..cb2e437847 --- /dev/null +++ b/script/prepare-training-data-resnet/README.md @@ -0,0 +1,207 @@ +Automatically generated README for this automation recipe: **prepare-training-data-resnet** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=prepare-training-data-resnet,d42a8a8ca2704f9f) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *prepare,mlperf,training,data,input,resnet* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "prepare mlperf training data input resnet" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=prepare,mlperf,training,data,input,resnet` + +`cm run script --tags=prepare,mlperf,training,data,input,resnet[,variations] [--input_flags]` + +*or* + +`cmr "prepare mlperf training data input resnet"` + +`cmr "prepare mlperf training data input resnet [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prepare,mlperf,training,data,input,resnet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="prepare,mlperf,training,data,input,resnet"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=prepare,mlperf,training,data,input,resnet) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "prepare mlperf training data input resnet[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_mxnet.#` + - Environment variables: + - *CM_MXNET_VERSION*: `#` + - Workflow: + +
+ + + * Group "**implementation**" +
+ Click here to expand this section. + + * **`_nvidia`** (default) + - Environment variables: + - *CM_TMP_VARIATION*: `nvidia` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,mlperf,training,nvidia,code + * CM names: `--adr.['nvidia-training-code']...` + - CM script: [get-mlperf-training-nvidia-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-nvidia-code) + * get,git,repo,_repo.https://github.com/NVIDIA/DeepLearningExamples,_sha.81ee705868a11d6fe18c12d237abe4a08aab5fd6 + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * `_reference` + - Environment variables: + - *CM_TMP_VARIATION*: `reference` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,mlperf,training,src + * CM names: `--adr.['mlperf-training-src']...` + - CM script: [get-mlperf-training-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-src) + * get,python3 + * CM names: `--adr.['python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_tensorflow + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_protobuf + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + +#### Default variations + +`_nvidia` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--data_dir=value` → `CM_DATA_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "data_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/_cm.json)*** + * get,dataset,imagenet,train + * CM names: `--adr.['imagenet-train']...` + - CM script: [get-dataset-imagenet-train](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-train) + * get,dataset,imagenet,val,original,_full + * CM names: `--adr.['imagenet-val']...` + - CM script: [get-dataset-imagenet-val](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-val) + * get,generic-sys-util,_rsync + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/_cm.json)*** + * download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_2012_validation_synset_labels.txt + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/tpu/master/tools/datasets/imagenet_to_gcs.py + * `if (CM_TMP_VARIATION == reference)` + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + 1. ***Run native script if exists*** + * [run-nvidia.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/run-nvidia.sh) + * [run-reference.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/run-reference.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prepare-training-data-resnet/_cm.json) + +___ +### Script output +`cmr "prepare mlperf training data input resnet [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH` +* `CM_MLPERF_TRAINING_RESNET_*` +#### New environment keys auto-detected from customize + +* `CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH` +* `CM_MLPERF_TRAINING_RESNET_DATA_PATH` +* `CM_MLPERF_TRAINING_RESNET_TFRECORDS_PATH` \ No newline at end of file diff --git a/script/prepare-training-data-resnet/_cm.json b/script/prepare-training-data-resnet/_cm.json new file mode 100644 index 0000000000..40a0a4f909 --- /dev/null +++ b/script/prepare-training-data-resnet/_cm.json @@ -0,0 +1,121 @@ +{ + "alias": "prepare-training-data-resnet", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "MLPerf benchmark support", + "deps": [], + "input_description": {}, + "input_mapping": { + "data_dir": "CM_DATA_DIR" + }, + "new_env_keys": [ + "CM_MLPERF_TRAINING_RESNET_*", + "CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "deps": [ + { + "names": [ "imagenet-train" ], + "tags": "get,dataset,imagenet,train" + }, + { + "names": [ "imagenet-val" ], + "tags": "get,dataset,imagenet,val,original,_full" + }, + { + "tags": "get,generic-sys-util,_rsync" + } + ], + "prehook_deps": [ + { + "tags": "download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_2012_validation_synset_labels.txt", + "env": { + "CM_DOWNLOAD_PATH": "<<>>", + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_IMAGENET_LABELS_FILE_PATH", + "CM_DOWNLOAD_RENAME_FILE": "synset_labels.txt", + "CM_DOWNLOAD_CHECKSUM": "" + }, + "force_cache": true, + "extra_cache_tags": "imagenet,val,labels" + }, + { + "tags": "download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/tpu/master/tools/datasets/imagenet_to_gcs.py", + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_IMAGENET_TO_GCS_SCRIPT_PATH" + }, + "force_cache": true, + "extra_cache_tags": "imagenet_to_gcs,script", + "enable_if_env": { + "CM_TMP_VARIATION": [ "reference" ] + } + } + ], + "tags": [ + "prepare", + "mlperf", + "training", + "data", + "input", + "resnet" + ], + "uid": "d42a8a8ca2704f9f", + "variations": { + "nvidia": { + "default": true, + "deps": [ + { + "names": [ + "nvidia-training-code" + ], + "tags": "get,mlperf,training,nvidia,code" + }, + { + "tags": "get,git,repo,_repo.https://github.com/NVIDIA/DeepLearningExamples,_sha.81ee705868a11d6fe18c12d237abe4a08aab5fd6", + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH" + }, + "extra_cache_tags": "nvidia,deeplearning_examples" + } + ], + "env": { + "CM_TMP_VARIATION": "nvidia" + }, + "group": "implementation" + }, + "mxnet.#": { + "env": { + "CM_MXNET_VERSION": "#" + } + }, + "reference": { + "deps": [ + { + "names": [ + "mlperf-training-src" + ], + "tags": "get,mlperf,training,src" + }, + { + "names": [ + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_tensorflow" + }, + { + "tags": "get,generic-python-lib,_protobuf" + } + ], + "env": { + "CM_TMP_VARIATION": "reference" + }, + "group": "implementation" + } + }, + "versions": {} +} diff --git a/script/prepare-training-data-resnet/customize.py b/script/prepare-training-data-resnet/customize.py new file mode 100644 index 0000000000..825a96df59 --- /dev/null +++ b/script/prepare-training-data-resnet/customize.py @@ -0,0 +1,52 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + datadir = env.get('CM_DATA_DIR', os.getcwd()) + env['CM_DATA_DIR'] = datadir + + env['MXNET_VER'] = env.get('CM_MXNET_VER', '22.08').replace("-", ".") + + env['CM_IMAGENET_LABELS_DOWNLOAD_DIR'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + code_path = os.path.join(env['CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH'], 'MxNet', 'Classification', 'RN50v1.5') + env['CM_RUN_DIR'] = code_path + i['run_script_input']['script_name'] = "run-nvidia" + + elif env.get("CM_TMP_VARIATION", "") == "reference": + code_path = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], 'image_classification', 'tensorflow2') + env['CM_RUN_DIR'] = code_path + i['run_script_input']['script_name'] = "run-reference" + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + data_dir = env['CM_DATA_DIR'] + env['CM_MLPERF_TRAINING_RESNET_DATA_PATH'] = data_dir + + env['CM_MLPERF_TRAINING_IMAGENET_PATH'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + env['CM_GET_DEPENDENT_CACHED_PATH'] = data_dir + env['CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH'] = data_dir + + elif env.get("CM_TMP_VARIATION", "") == "reference": + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join(data_dir, "tfrecords") + env['CM_MLPERF_TRAINING_RESNET_TFRECORDS_PATH'] = os.path.join(data_dir, "tfrecords") + + return {'return':0} diff --git a/script/prepare-training-data-resnet/run-nvidia.sh b/script/prepare-training-data-resnet/run-nvidia.sh new file mode 100644 index 0000000000..e7ffdb741b --- /dev/null +++ b/script/prepare-training-data-resnet/run-nvidia.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +mkdir -p train_data/train +mkdir -p train_data/val +rsync -avz ${CM_DATASET_IMAGENET_TRAIN_PATH}/ train_data/train/ +rsync -avz ${CM_DATASET_IMAGENET_VAL_PATH}/ train_data/val/ +cd train_data/train +find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done +cd ../val +run "wget --no-check-certificate -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash" +cd ../../ +DATA_DIR=`pwd`/train_data + +CUR=${CM_DATA_DIR} +run "cd \"${CM_RUN_DIR}\"" +run "docker build --build-arg FROM_IMAGE_NAME=nvcr.io/nvidia/mxnet:${MXNET_VER}-py3 -t nvidia_rn50_mx ." +run "ID=`docker run -dt --gpus all --runtime=nvidia --ipc=host -v ${DATA_DIR}:/data -v ${CUR}:/preprocessed nvidia_rn50_mx bash`" +run "docker exec $ID bash -c './scripts/prepare_imagenet.sh /data /preprocessed'" diff --git a/script/prepare-training-data-resnet/run-reference.sh b/script/prepare-training-data-resnet/run-reference.sh new file mode 100644 index 0000000000..332da70cc4 --- /dev/null +++ b/script/prepare-training-data-resnet/run-reference.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD +DATA_DIR=${CM_DATA_DIR:-"$PWD/data"} + +cd ${CM_RUN_DIR} +mkdir -p ${DATA_DIR}/tfrecords +cmd="python3 ${CM_IMAGENET_TO_GCS_SCRIPT_PATH} \ + --raw_data_dir=${CM_DATASET_IMAGENET_TRAIN_PATH} \ + --local_scratch_dir=${DATA_DIR}/tfrecords \ + --nogcs_upload" +run "$cmd" diff --git a/script/prepare-training-data-resnet/run_config.yml b/script/prepare-training-data-resnet/run_config.yml new file mode 100644 index 0000000000..688f811ea4 --- /dev/null +++ b/script/prepare-training-data-resnet/run_config.yml @@ -0,0 +1,13 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + fake_run_deps: true + mounts: + - ${{ CM_DATA_DIR }}:${{ CM_DATA_DIR }} + +run_with_default_inputs: true #if false the script won't run automatic tests + +minimum_system_requirements: + ram: 512 #in GB + disk_space: 200 #in GB diff --git a/script/preprocess-mlperf-inference-submission/README.md b/script/preprocess-mlperf-inference-submission/README.md new file mode 100644 index 0000000000..322583a506 --- /dev/null +++ b/script/preprocess-mlperf-inference-submission/README.md @@ -0,0 +1,145 @@ +Automatically generated README for this automation recipe: **preprocess-mlperf-inference-submission** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=preprocess-mlperf-inference-submission,c23068394a314266) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess` + +`cm run script --tags=run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess [--input_flags]` + +*or* + +`cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess"` + +`cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "submission_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src', 'submission-checker-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlperf,submission,dir + * `if (CM_MLPERF_INFERENCE_SUBMISSION_DIR != on)` + * CM names: `--adr.['get-mlperf-submission-dir']...` + - CM script: [get-mlperf-inference-submission-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-submission-dir) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/preprocess-mlperf-inference-submission/_cm.json) + +___ +### Script output +`cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/preprocess-mlperf-inference-submission/_cm.json b/script/preprocess-mlperf-inference-submission/_cm.json new file mode 100644 index 0000000000..312ab7c2f1 --- /dev/null +++ b/script/preprocess-mlperf-inference-submission/_cm.json @@ -0,0 +1,50 @@ +{ + "alias": "preprocess-mlperf-inference-submission", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "MLPerf benchmark support", + "clean_files": [], + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "inference-src", + "submission-checker-src" + ], + "tags": "get,mlcommons,inference,src" + }, + { + "tags": "get,mlperf,submission,dir", + "names": [ + "get-mlperf-submission-dir" + ], + "skip_if_env": { + "CM_MLPERF_INFERENCE_SUBMISSION_DIR": [ "on" ] + } + } + ], + "input_mapping": { + "submission_dir": "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "submitter": "CM_MLPERF_SUBMITTER" + }, + "tags": [ + "run", + "mlc", + "mlcommons", + "mlperf", + "inference", + "submission", + "mlperf-inference", + "processor", + "preprocessor", + "preprocess" + ], + "uid": "c23068394a314266" +} diff --git a/script/preprocess-mlperf-inference-submission/customize.py b/script/preprocess-mlperf-inference-submission/customize.py new file mode 100644 index 0000000000..03bca7cd9b --- /dev/null +++ b/script/preprocess-mlperf-inference-submission/customize.py @@ -0,0 +1,43 @@ +from cmind import utils +import cmind as cm +import os +from os.path import exists +import shutil + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + + if submission_dir == "": + print("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") + return {'return': 1, 'error':'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified'} + + submitter = env.get("CM_MLPERF_SUBMITTER", "cTuning") + submission_processed = submission_dir + "_processed" + + if os.path.exists(submission_processed): + shutil.rmtree(submission_processed) + + os.system("rm -rf " + submission_dir + "_processed") + + CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "preprocess_submission.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --output '" + submission_processed + "'" + env['CM_RUN_CMD'] = CMD + + return {'return':0} + +def postprocess(i): + + env = i['env'] + submission_dir = env["CM_MLPERF_INFERENCE_SUBMISSION_DIR"] + import datetime + submission_backup = submission_dir+"_backup_"+'{date:%Y-%m-%d_%H:%M:%S}'.format( date=datetime.datetime.now() ) + + submission_processed = submission_dir + "_processed" + shutil.copytree(submission_dir, submission_backup) + shutil.rmtree(submission_dir) + os.rename(submission_processed, submission_dir) + + return {'return':0} diff --git a/script/preprocess-mlperf-inference-submission/run.sh b/script/preprocess-mlperf-inference-submission/run.sh new file mode 100644 index 0000000000..1b3c5c3c02 --- /dev/null +++ b/script/preprocess-mlperf-inference-submission/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? diff --git a/script/print-croissant-desc/README-extra.md b/script/print-croissant-desc/README-extra.md new file mode 100644 index 0000000000..a3c638caaa --- /dev/null +++ b/script/print-croissant-desc/README-extra.md @@ -0,0 +1,16 @@ +# MLCommons CM automation recipe + +## Print [Croissant](https://github.com/mlcommons/croissant) description from metadata URL + +```bash +pip intstall cmind + +cm pull repo ctuning@mlcommons-ck + +cmr "print croissant desc" --url="https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json" +``` + +## About + +* Code snippet taken from https://github.com/mlcommons/croissant/pull/564/files ([@mkuchnik](https://github.com/mkuchnik)) +* CM automation recipe added by [@gfursin](https://github.com/gfursin). \ No newline at end of file diff --git a/script/print-croissant-desc/README.md b/script/print-croissant-desc/README.md new file mode 100644 index 0000000000..1d7e989a32 --- /dev/null +++ b/script/print-croissant-desc/README.md @@ -0,0 +1,146 @@ +Automatically generated README for this automation recipe: **print-croissant-desc** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=print-croissant-desc,59116d5c98a04d4f) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-croissant-desc)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *print,croissant,desc* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "print croissant desc" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=print,croissant,desc` + +`cm run script --tags=print,croissant,desc [--input_flags]` + +*or* + +`cmr "print croissant desc"` + +`cmr "print croissant desc " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,croissant,desc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="print,croissant,desc"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=print,croissant,desc) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "print croissant desc" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--url=value` → `CM_PRINT_CROISSANT_URL=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "url":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_PRINT_CROISSANT_URL: `https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-croissant-desc/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,croissant + * CM names: `--adr.['croissant']...` + - CM script: [get-croissant](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-croissant) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-croissant-desc/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-croissant-desc/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-croissant-desc/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-croissant-desc/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-croissant-desc/_cm.yaml) + +___ +### Script output +`cmr "print croissant desc " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/print-croissant-desc/_cm.yaml b/script/print-croissant-desc/_cm.yaml new file mode 100644 index 0000000000..ef4d2a7ba2 --- /dev/null +++ b/script/print-croissant-desc/_cm.yaml @@ -0,0 +1,29 @@ +alias: print-croissant-desc +uid: 59116d5c98a04d4f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +input_mapping: + url: CM_PRINT_CROISSANT_URL + +default_env: + CM_PRINT_CROISSANT_URL: "https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json" + +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 +- names: + - croissant + tags: get,croissant + +tags: +- print +- croissant +- desc diff --git a/script/print-croissant-desc/code.py b/script/print-croissant-desc/code.py new file mode 100644 index 0000000000..a475c5a6ec --- /dev/null +++ b/script/print-croissant-desc/code.py @@ -0,0 +1,27 @@ +# Taken from https://github.com/mlcommons/croissant/pull/564/files (@mkuchnik) + +import os +import mlcroissant as mlc + +def main(): + + url = os.environ.get('CM_PRINT_CROISSANT_URL', '') + + if url=='': + print ('Error: --url is not specified') + exit(1) + + ds = mlc.Dataset(url) + metadata = ds.metadata.to_json() + + print ('') + print ('Croissant meta data URL: {}'.format(url)) + print ('') + print (f"{metadata['name']}: {metadata['description']}") + + print ('') + for x in ds.records(record_set="default"): + print(x) + +if __name__ == '__main__': + main() diff --git a/script/print-croissant-desc/run.bat b/script/print-croissant-desc/run.bat new file mode 100644 index 0000000000..37f249b0fe --- /dev/null +++ b/script/print-croissant-desc/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/print-croissant-desc/run.sh b/script/print-croissant-desc/run.sh new file mode 100644 index 0000000000..9b94917d9e --- /dev/null +++ b/script/print-croissant-desc/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/script/print-hello-world-java/README.md b/script/print-hello-world-java/README.md new file mode 100644 index 0000000000..af2aacf9b6 --- /dev/null +++ b/script/print-hello-world-java/README.md @@ -0,0 +1,125 @@ +Automatically generated README for this automation recipe: **print-hello-world-java** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=print-hello-world-java,3b62dc46cce3489c) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-java)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *print,hello world,hello-world,hello,world,java* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "print hello world hello-world hello world java" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=print,hello world,hello-world,hello,world,java` + +`cm run script --tags=print,hello world,hello-world,hello,world,java ` + +*or* + +`cmr "print hello world hello-world hello world java"` + +`cmr "print hello world hello-world hello world java " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello world,hello-world,hello,world,java' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="print,hello world,hello-world,hello,world,java"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=print,hello world,hello-world,hello,world,java) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "print hello world hello-world hello world java" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-java/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,java + * CM names: `--adr.['java']...` + - CM script: [get-java](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-java) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-java/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-java/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-java/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-java/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-java/_cm.json) + +___ +### Script output +`cmr "print hello world hello-world hello world java " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/print-hello-world-java/_cm.json b/script/print-hello-world-java/_cm.json new file mode 100644 index 0000000000..6aeb64deb8 --- /dev/null +++ b/script/print-hello-world-java/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "print-hello-world-java", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "java" + ], + "tags": "get,java" + } + ], + "tags": [ + "print", + "hello world", + "hello-world", + "hello", + "world", + "java" + ], + "uid": "3b62dc46cce3489c" +} diff --git a/script/print-hello-world-java/code.java b/script/print-hello-world-java/code.java new file mode 100644 index 0000000000..4bb917c9e0 --- /dev/null +++ b/script/print-hello-world-java/code.java @@ -0,0 +1,27 @@ +/* + Developer: Grigori Fursin +*/ + +//Import libraries... +import java.io.*; + +public class hello_world +{ + static int N=16; + static double[][] A=new double [N][N]; + static double[][] B=new double [N][N]; + static double[][] C=new double [N][N]; + + // ******************************************************************* + public static void main(String args[]) + { + System.out.println("Hello world!"); + System.out.println(""); + + String env=System.getenv("CM_VAR1"); + System.out.println("CM_VAR1="+env); + + env=System.getenv("CM_VAR2"); + System.out.println("CM_VAR2="+env); + } +} diff --git a/script/print-hello-world-java/run.bat b/script/print-hello-world-java/run.bat new file mode 100644 index 0000000000..f57f2084b4 --- /dev/null +++ b/script/print-hello-world-java/run.bat @@ -0,0 +1,4 @@ +echo %CM_JAVA_BIN_WITH_PATH% + +%CM_JAVA_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.java +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/print-hello-world-java/run.sh b/script/print-hello-world-java/run.sh new file mode 100644 index 0000000000..7c5ab3f6aa --- /dev/null +++ b/script/print-hello-world-java/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +which ${CM_JAVA_BIN_WITH_PATH} + +${CM_JAVA_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.java +test $? -eq 0 || exit $? diff --git a/script/print-hello-world-javac/README.md b/script/print-hello-world-javac/README.md new file mode 100644 index 0000000000..ef9a2e6a7e --- /dev/null +++ b/script/print-hello-world-javac/README.md @@ -0,0 +1,125 @@ +Automatically generated README for this automation recipe: **print-hello-world-javac** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=print-hello-world-javac,040fafd538104819) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-javac)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *print,hello world,hello-world,hello,world,javac* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "print hello world hello-world hello world javac" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=print,hello world,hello-world,hello,world,javac` + +`cm run script --tags=print,hello world,hello-world,hello,world,javac ` + +*or* + +`cmr "print hello world hello-world hello world javac"` + +`cmr "print hello world hello-world hello world javac " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello world,hello-world,hello,world,javac' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="print,hello world,hello-world,hello,world,javac"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=print,hello world,hello-world,hello,world,javac) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "print hello world hello-world hello world javac" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-javac/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,javac + * CM names: `--adr.['javac']...` + - CM script: [get-javac](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-javac) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-javac/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-javac/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-javac/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-javac/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-javac/_cm.json) + +___ +### Script output +`cmr "print hello world hello-world hello world javac " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/print-hello-world-javac/_cm.json b/script/print-hello-world-javac/_cm.json new file mode 100644 index 0000000000..39be803cd4 --- /dev/null +++ b/script/print-hello-world-javac/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "print-hello-world-javac", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "javac" + ], + "tags": "get,javac" + } + ], + "tags": [ + "print", + "hello world", + "hello-world", + "hello", + "world", + "javac" + ], + "uid": "040fafd538104819" +} diff --git a/script/print-hello-world-javac/code.java b/script/print-hello-world-javac/code.java new file mode 100644 index 0000000000..9eb859cdaf --- /dev/null +++ b/script/print-hello-world-javac/code.java @@ -0,0 +1,27 @@ +/* + Developer: Grigori Fursin +*/ + +//Import libraries... +import java.io.*; + +public class code +{ + static int N=16; + static double[][] A=new double [N][N]; + static double[][] B=new double [N][N]; + static double[][] C=new double [N][N]; + + // ******************************************************************* + public static void main(String args[]) + { + System.out.println("Hello world!"); + System.out.println(""); + + String env=System.getenv("CM_VAR1"); + System.out.println("CM_VAR1="+env); + + env=System.getenv("CM_VAR2"); + System.out.println("CM_VAR2="+env); + } +} diff --git a/script/print-hello-world-javac/run.bat b/script/print-hello-world-javac/run.bat new file mode 100644 index 0000000000..583b89804e --- /dev/null +++ b/script/print-hello-world-javac/run.bat @@ -0,0 +1,8 @@ +echo "%CM_JAVA_BIN_WITH_PATH%" +echo. + +"%CM_JAVAC_BIN_WITH_PATH%" %CM_TMP_CURRENT_SCRIPT_PATH%\code.java +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +"%CM_JAVA_BIN_WITH_PATH%" -classpath "%CM_TMP_CURRENT_SCRIPT_PATH%" code +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/print-hello-world-javac/run.sh b/script/print-hello-world-javac/run.sh new file mode 100644 index 0000000000..c7fb26cbc4 --- /dev/null +++ b/script/print-hello-world-javac/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +echo "${CM_JAVAC_BIN_WITH_PATH}" +echo "" + +${CM_JAVAC_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.java +test $? -eq 0 || exit 1 + +${CM_JAVA_BIN_WITH_PATH} -classpath "${CM_TMP_CURRENT_SCRIPT_PATH}" code +test $? -eq 0 || exit 1 diff --git a/script/print-hello-world-py/README.md b/script/print-hello-world-py/README.md new file mode 100644 index 0000000000..9b6618c197 --- /dev/null +++ b/script/print-hello-world-py/README.md @@ -0,0 +1,130 @@ +Automatically generated README for this automation recipe: **print-hello-world-py** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=print-hello-world-py,d83274c7eb754d90) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-py)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *print,hello world,hello-world,hello,world,python* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "print hello world hello-world hello world python" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=print,hello world,hello-world,hello,world,python` + +`cm run script --tags=print,hello world,hello-world,hello,world,python ` + +*or* + +`cmr "print hello world hello-world hello world python"` + +`cmr "print hello world hello-world hello world python " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello world,hello-world,hello,world,python' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="print,hello world,hello-world,hello,world,python"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=print,hello world,hello-world,hello,world,python) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "print hello world hello-world hello world python" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-py/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * print,python-version + * `if (CM_SKIP_PRINT != True OR CM_SKIP_PRINT2 != True)` + - CM script: [print-python-version](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-python-version) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-py/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-py/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-py/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-py/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world-py/_cm.json) + +___ +### Script output +`cmr "print hello world hello-world hello world python " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/print-hello-world-py/_cm.json b/script/print-hello-world-py/_cm.json new file mode 100644 index 0000000000..5e6a0629c7 --- /dev/null +++ b/script/print-hello-world-py/_cm.json @@ -0,0 +1,37 @@ +{ + "alias": "print-hello-world-py", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "get,sys-utils-cm" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "print,python-version", + "skip_if_env": { + "CM_SKIP_PRINT":["True"], + "CM_SKIP_PRINT2":["True"] + } + } + ], + "tags": [ + "print", + "hello world", + "hello-world", + "hello", + "world", + "python" + ], + "uid": "d83274c7eb754d90" +} diff --git a/script/print-hello-world-py/code.py b/script/print-hello-world-py/code.py new file mode 100644 index 0000000000..735a890622 --- /dev/null +++ b/script/print-hello-world-py/code.py @@ -0,0 +1,6 @@ +def main(): + print ('') + print ('HELLO WORLD from Python') + +if __name__ == '__main__': + main() diff --git a/script/print-hello-world-py/run.bat b/script/print-hello-world-py/run.bat new file mode 100644 index 0000000000..d1881d3a35 --- /dev/null +++ b/script/print-hello-world-py/run.bat @@ -0,0 +1,8 @@ +IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% + +%CM_PYTHON_BIN_WITH_PATH% --version + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo CM_NEW_VAR_FROM_RUN=XYZ > tmp-run-env.out diff --git a/script/print-hello-world-py/run.sh b/script/print-hello-world-py/run.sh new file mode 100644 index 0000000000..b7c69c7906 --- /dev/null +++ b/script/print-hello-world-py/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +which ${CM_PYTHON_BIN_WITH_PATH} +${CM_PYTHON_BIN_WITH_PATH} --version + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? + +echo "CM_NEW_VAR_FROM_RUN=$MLPERF_XYZ" > tmp-run-env.out diff --git a/script/print-hello-world/README.md b/script/print-hello-world/README.md new file mode 100644 index 0000000000..d2f6d33263 --- /dev/null +++ b/script/print-hello-world/README.md @@ -0,0 +1,137 @@ +Automatically generated README for this automation recipe: **print-hello-world** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=print-hello-world,b9f0acba4aca4baa) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *print,hello-world,hello world,hello,world,native-script,native,script* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "print hello-world hello world hello world native-script native script" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=print,hello-world,hello world,hello,world,native-script,native,script` + +`cm run script --tags=print,hello-world,hello world,hello,world,native-script,native,script [--input_flags]` + +*or* + +`cmr "print hello-world hello world hello world native-script native script"` + +`cmr "print hello-world hello world hello world native-script native script " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello-world,hello world,hello,world,native-script,native,script' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="print,hello-world,hello world,hello,world,native-script,native,script"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=print,hello-world,hello world,hello,world,native-script,native,script) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "print hello-world hello world hello world native-script native script" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--test1=value` → `CM_ENV_TEST1=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "test1":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_ENV_TEST1: `TEST1` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-hello-world/_cm.json) + +___ +### Script output +`cmr "print hello-world hello world hello world native-script native script " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_ENV_TEST*` +#### New environment keys auto-detected from customize diff --git a/script/print-hello-world/_cm.json b/script/print-hello-world/_cm.json new file mode 100644 index 0000000000..c8ccffe533 --- /dev/null +++ b/script/print-hello-world/_cm.json @@ -0,0 +1,32 @@ +{ + "alias": "print-hello-world", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "default_env": { + "CM_ENV_TEST1": "TEST1" + }, + "env": { + "CM_ENV_TEST2": "TEST2" + }, + "input_mapping": { + "test1": "CM_ENV_TEST1" + }, + "new_env_keys": [ + "CM_ENV_TEST*" + ], + "new_state_keys": [ + "hello_world*" + ], + "tags": [ + "print", + "hello-world", + "hello world", + "hello", + "world", + "native-script", + "native", + "script" + ], + "uid": "b9f0acba4aca4baa" +} diff --git a/script/print-hello-world/run.bat b/script/print-hello-world/run.bat new file mode 100644 index 0000000000..03810bf9c1 --- /dev/null +++ b/script/print-hello-world/run.bat @@ -0,0 +1,7 @@ +echo. +echo CM_ENV_TEST1 = %CM_ENV_TEST1% +echo CM_ENV_TEST2 = %CM_ENV_TEST2% +echo CM_ENV_TEST3 = %CM_ENV_TEST3% + +echo. +echo HELLO WORLD! diff --git a/script/print-hello-world/run.sh b/script/print-hello-world/run.sh new file mode 100644 index 0000000000..32824d982d --- /dev/null +++ b/script/print-hello-world/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "" +echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}" +echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}" +echo "CM_ENV_TEST3 = ${CM_ENV_TEST3}" + +echo "" +echo "HELLO WORLD!" diff --git a/script/print-python-version/README.md b/script/print-python-version/README.md new file mode 100644 index 0000000000..a18eb31f44 --- /dev/null +++ b/script/print-python-version/README.md @@ -0,0 +1,123 @@ +Automatically generated README for this automation recipe: **print-python-version** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=print-python-version,d3a538fa4abb464b) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-python-version)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *print,python,version,python-version* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "print python version python-version" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=print,python,version,python-version` + +`cm run script --tags=print,python,version,python-version ` + +*or* + +`cmr "print python version python-version"` + +`cmr "print python version python-version " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,python,version,python-version' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="print,python,version,python-version"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=print,python,version,python-version) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "print python version python-version" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-python-version/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-python-version/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-python-version/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-python-version/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-python-version/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/print-python-version/_cm.json) + +___ +### Script output +`cmr "print python version python-version " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/print-python-version/_cm.json b/script/print-python-version/_cm.json new file mode 100644 index 0000000000..6778ff7b3d --- /dev/null +++ b/script/print-python-version/_cm.json @@ -0,0 +1,22 @@ +{ + "alias": "print-python-version", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ], + "tags": [ + "print", + "python", + "version", + "python-version" + ], + "uid": "d3a538fa4abb464b" +} diff --git a/script/print-python-version/run.bat b/script/print-python-version/run.bat new file mode 100644 index 0000000000..e790303431 --- /dev/null +++ b/script/print-python-version/run.bat @@ -0,0 +1,8 @@ +echo. + +echo CM_PYTHON_BIN = %CM_PYTHON_BIN% +echo CM_PYTHON_BIN_WITH_PATH = %CM_PYTHON_BIN_WITH_PATH% + +echo . + +%CM_PYTHON_BIN_WITH_PATH% --version diff --git a/script/print-python-version/run.sh b/script/print-python-version/run.sh new file mode 100644 index 0000000000..3c54cd68e1 --- /dev/null +++ b/script/print-python-version/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +echo "" + +echo "CM_PYTHON_BIN = ${CM_PYTHON_BIN}" +echo "CM_PYTHON_BIN_WITH_PATH = ${CM_PYTHON_BIN_WITH_PATH}" + +echo "" + +${CM_PYTHON_BIN_WITH_PATH} --version + diff --git a/script/process-ae-users/README.md b/script/process-ae-users/README.md new file mode 100644 index 0000000000..326236b746 --- /dev/null +++ b/script/process-ae-users/README.md @@ -0,0 +1,138 @@ +Automatically generated README for this automation recipe: **process-ae-users** + +Category: **Reproducibility and artifact evaluation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=process-ae-users,5800f1ed677e4efb) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *process,ae,users* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "process ae users" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=process,ae,users` + +`cm run script --tags=process,ae,users [--input_flags]` + +*or* + +`cmr "process ae users"` + +`cmr "process ae users " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'process,ae,users' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="process,ae,users"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=process,ae,users) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "process ae users" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--file=value` → `CM_PROCESS_AE_USERS_INPUT_FILE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "file":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-ae-users/_cm.json) + +___ +### Script output +`cmr "process ae users " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/process-ae-users/_cm.json b/script/process-ae-users/_cm.json new file mode 100644 index 0000000000..5b73955a92 --- /dev/null +++ b/script/process-ae-users/_cm.json @@ -0,0 +1,25 @@ +{ + "alias": "process-ae-users", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "Reproducibility and artifact evaluation", + "input_mapping": { + "file": "CM_PROCESS_AE_USERS_INPUT_FILE" + }, + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + } + ], + "tags": [ + "process", + "ae", + "users" + ], + "uid": "5800f1ed677e4efb" +} diff --git a/script/process-ae-users/code.py b/script/process-ae-users/code.py new file mode 100644 index 0000000000..6437eaa5e4 --- /dev/null +++ b/script/process-ae-users/code.py @@ -0,0 +1,78 @@ +import os +import csv +import json +import cmind + +def main(): + f = os.environ.get('CM_PROCESS_AE_USERS_INPUT_FILE','') + + print ('Input CSV file: {}'.format(f)) + + users = [] + with open(f, 'r') as ff: + csvreader = csv.DictReader(ff) + for row in csvreader: + if len(row)>0: + users.append(row) + + print ('') + html = '
    \n' + for user in sorted(users, key = lambda u: (u['last'].lower(), u['first'].lower())): + + full_name = user['first']+' '+user['last'] + + name = full_name + ' ('+user['affiliation']+')' + + print (name) + + html += '
  • '+name+'\n' + + # Checking contributor + r = cmind.access({'action':'find', + 'automation':'contributor', + 'artifact':full_name}) + if r['return']>0: return r + + lst = r['list'] + + if len(lst)==0: + print (' CM contributor not found!') + + meta = { + 'challenges': [ + 'ae-micro2023' + ], + 'last_participation_date': '202309', + 'name': full_name, + 'organization': user['affiliation'] + } + + print (' Adding to mlcommons@ck ...') + r = cmind.access({'out':'con', + 'action':'add', + 'automation':'contributor,68eae17b590d4f8f', # Need UID since using common function + 'artifact':'mlcommons@ck:'+full_name, + 'meta':meta, + 'common':True + }) + if r['return']>0: return r + + + html += '
\n' + + fo = f+'.html' + + print ('') + print ('Saved HTML to {}'.format(fo)) + + cmind.utils.save_txt(fo, html) + + + + return {'return':0} + + +if __name__ == '__main__': + r=main() + if r['return']>0: + cmind.error(r) diff --git a/script/process-ae-users/customize.py b/script/process-ae-users/customize.py new file mode 100644 index 0000000000..fef0a3ddfd --- /dev/null +++ b/script/process-ae-users/customize.py @@ -0,0 +1,10 @@ +from cmind import utils +import cmind as cm +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + return {'return':0} diff --git a/script/process-ae-users/run.bat b/script/process-ae-users/run.bat new file mode 100644 index 0000000000..37f249b0fe --- /dev/null +++ b/script/process-ae-users/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/process-ae-users/run.sh b/script/process-ae-users/run.sh new file mode 100644 index 0000000000..9b94917d9e --- /dev/null +++ b/script/process-ae-users/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/script/process-mlperf-accuracy/README.md b/script/process-mlperf-accuracy/README.md new file mode 100644 index 0000000000..ff6fe6e805 --- /dev/null +++ b/script/process-mlperf-accuracy/README.md @@ -0,0 +1,332 @@ +Automatically generated README for this automation recipe: **process-mlperf-accuracy** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=process-mlperf-accuracy,6e809013816b42ea) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mlperf mlcommons accuracy mlc process process-accuracy" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy` + +`cm run script --tags=run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy[,variations] [--input_flags]` + +*or* + +`cmr "run mlperf mlcommons accuracy mlc process process-accuracy"` + +`cmr "run mlperf mlcommons accuracy mlc process process-accuracy [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mlperf mlcommons accuracy mlc process process-accuracy[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_default-pycocotools,openimages` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_pycocotools + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,mlcommons,mlperf,inference,src,-_openimages-nvidia-pycocotools + * CM names: `--adr.['for-pycocotools', 'accuracy-check-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * `_nvidia-pycocotools,openimages` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_nvidia-pycocotools + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,mlcommons,mlperf,inference,src,_openimages-nvidia-pycocotools + * CM names: `--adr.['for-pycocotools', 'accuracy-check-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + +
+ + + * Group "**coco-evaluation-tool**" +
+ Click here to expand this section. + + * **`_default-pycocotools`** (default) + - Workflow: + * `_nvidia-pycocotools` + - Workflow: + +
+ + + * Group "**dataset**" +
+ Click here to expand this section. + + * `_cnndm` + - Environment variables: + - *CM_DATASET*: `cnndm` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset,cnndm,_validation + - CM script: [get-dataset-cnndm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm) + * get,generic-python-lib,_package.rouge_score + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.nltk + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.evaluate + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.absl-py + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.rouge_score + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_coco2014` + - Environment variables: + - *CM_DATASET*: `coco2014` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset,coco2014,original + * CM names: `--adr.['coco2014-dataset', 'coco2014-original']...` + - CM script: [get-dataset-coco2014](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-coco2014) + * **`_imagenet`** (default) + - Environment variables: + - *CM_DATASET*: `imagenet` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset-aux,image-classification,imagenet-aux + - CM script: [get-dataset-imagenet-aux](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-imagenet-aux) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_kits19` + - Environment variables: + - *CM_DATASET*: `kits19` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset,preprocessed,medical-imaging,kits19 + - CM script: [get-preprocessed-dataset-kits19](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-kits19) + * `_librispeech` + - Environment variables: + - *CM_DATASET*: `librispeech` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset,preprocessed,speech-recognition,librispeech + - CM script: [get-preprocessed-dataset-librispeech](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-librispeech) + * `_open-orca` + - Environment variables: + - *CM_DATASET*: `openorca` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset,openorca,preprocessed + * CM names: `--adr.['openorca-dataset']...` + - CM script: [get-preprocessed-dataset-openorca](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openorca) + * get,ml-model,llama2 + * CM names: `--adr.['llama2-model']...` + - CM script: [get-ml-model-llama2](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-llama2) + * `_openimages` + - Environment variables: + - *CM_DATASET*: `openimages` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,dataset-aux,openimages,annotations + * `if (CM_MLPERF_RUN_STYLE == valid)` + - CM script: [get-dataset-openimages-annotations](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages-annotations) + * get,dataset,openimages,original + * `if (CM_MLPERF_RUN_STYLE != valid)` + * CM names: `--adr.['openimages-original']...` + - CM script: [get-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages) + * get,generic-python-lib,_package.kiwisolver + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_squad` + - Environment variables: + - *CM_DATASET*: `squad` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_boto3 + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,dataset,squad,language-processing + * `if (CM_DATASET_SQUAD_VAL_PATH not in [])` + - CM script: [get-dataset-squad](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad) + * get,dataset-aux,squad-vocab + * `if (CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH != on)` + - CM script: [get-dataset-squad-vocab](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-squad-vocab) + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tokenization + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_terabyte` + - Environment variables: + - *CM_DATASET*: `squad` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_ujson + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_scikit-learn + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_float16` + - Environment variables: + - *CM_ACCURACY_DTYPE*: `float16` + - Workflow: + * **`_float32`** (default) + - Environment variables: + - *CM_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_float64` + - Environment variables: + - *CM_ACCURACY_DTYPE*: `float64` + - Workflow: + * `_int16` + - Environment variables: + - *CM_ACCURACY_DTYPE*: `int16` + - Workflow: + * `_int32` + - Environment variables: + - *CM_ACCURACY_DTYPE*: `int32` + - Workflow: + * `_int64` + - Environment variables: + - *CM_ACCURACY_DTYPE*: `int64` + - Workflow: + * `_int8` + - Environment variables: + - *CM_ACCURACY_DTYPE*: `int8` + - Workflow: + +
+ + +#### Default variations + +`_default-pycocotools,_float32,_imagenet` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--result_dir=value` → `CM_MLPERF_ACCURACY_RESULTS_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "result_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src', 'accuracy-check-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/process-mlperf-accuracy/_cm.json) + +___ +### Script output +`cmr "run mlperf mlcommons accuracy mlc process process-accuracy [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/process-mlperf-accuracy/_cm.json b/script/process-mlperf-accuracy/_cm.json new file mode 100644 index 0000000000..d8c1f5d373 --- /dev/null +++ b/script/process-mlperf-accuracy/_cm.json @@ -0,0 +1,304 @@ +{ + "alias": "process-mlperf-accuracy", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "MLPerf benchmark support", + "clean_files": [], + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "names": [ + "inference-src", + "accuracy-check-src" + ], + "tags": "get,mlcommons,inference,src" + } + ], + "input_mapping": { + "result_dir": "CM_MLPERF_ACCURACY_RESULTS_DIR" + }, + "new_state_keys": [ + "app_mlperf_inference_accuracy*" + ], + "tags": [ + "run", + "mlperf", + "mlcommons", + "accuracy", + "mlc", + "process", + "process-accuracy" + ], + "uid": "6e809013816b42ea", + "variations": { + "cnndm": { + "deps": [ + { + "tags": "get,dataset,cnndm,_validation" + }, + { + "tags": "get,generic-python-lib,_package.rouge_score" + }, + { + "tags": "get,generic-python-lib,_package.nltk" + }, + { + "tags": "get,generic-python-lib,_package.evaluate" + }, + { + "tags": "get,generic-python-lib,_package.absl-py" + }, + { + "tags": "get,generic-python-lib,_package.rouge_score" + } + ], + "env": { + "CM_DATASET": "cnndm" + }, + "group": "dataset" + }, + "float16": { + "env": { + "CM_ACCURACY_DTYPE": "float16" + }, + "group": "precision" + }, + "float32": { + "default": "true", + "env": { + "CM_ACCURACY_DTYPE": "float32" + }, + "group": "precision" + }, + "float64": { + "env": { + "CM_ACCURACY_DTYPE": "float64" + }, + "group": "precision" + }, + "imagenet": { + "default": "true", + "deps": [ + { + "tags": "get,dataset-aux,image-classification,imagenet-aux" + }, + { + "tags": "get,generic-python-lib,_numpy" + } + ], + "env": { + "CM_DATASET": "imagenet" + }, + "group": "dataset" + }, + "int16": { + "env": { + "CM_ACCURACY_DTYPE": "int16" + }, + "group": "precision" + }, + "int32": { + "env": { + "CM_ACCURACY_DTYPE": "int32" + }, + "group": "precision" + }, + "int64": { + "env": { + "CM_ACCURACY_DTYPE": "int64" + }, + "group": "precision" + }, + "int8": { + "env": { + "CM_ACCURACY_DTYPE": "int8" + }, + "group": "precision" + }, + "kits19": { + "deps": [ + { + "tags": "get,dataset,preprocessed,medical-imaging,kits19" + } + ], + "env": { + "CM_DATASET": "kits19" + }, + "group": "dataset" + }, + "librispeech": { + "deps": [ + { + "tags": "get,dataset,preprocessed,speech-recognition,librispeech" + } + ], + "env": { + "CM_DATASET": "librispeech" + }, + "group": "dataset" + }, + "openimages": { + "deps": [ + { + "tags": "get,dataset-aux,openimages,annotations", + "enable_if_env": { + "CM_MLPERF_RUN_STYLE": [ + "valid" + ] + } + }, + { + "tags": "get,dataset,openimages,original", + "names": [ + "openimages-original" + ], + "skip_if_env": { + "CM_MLPERF_RUN_STYLE": [ + "valid" + ] + } + }, + { + "tags": "get,generic-python-lib,_package.kiwisolver" + } + ], + "env": { + "CM_DATASET": "openimages" + }, + "group": "dataset" + }, + "squad": { + "add_deps_recursive": { + "inference-src": { + "tags": "_deeplearningexamples" + } + }, + "deps": [ + { + "tags": "get,generic-python-lib,_boto3" + }, + { + "tags": "get,generic-python-lib,_package.transformers" + }, + { + "skip_if_env": { + "CM_DATASET_SQUAD_VAL_PATH": [] + }, + "tags": "get,dataset,squad,language-processing" + }, + { + "skip_if_env": { + "CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH": [ + "on" + ] + }, + "tags": "get,dataset-aux,squad-vocab" + }, + { + "tags": "get,generic-python-lib,_torch" + }, + { + "tags": "get,generic-python-lib,_tokenization" + } + ], + "env": { + "CM_DATASET": "squad" + }, + "group": "dataset" + }, + "terabyte": { + "deps": [ + { + "tags": "get,generic-python-lib,_ujson" + }, + { + "tags": "get,generic-python-lib,_scikit-learn" + }, + { + "tags": "get,generic-python-lib,_numpy" + } + ], + "env": { + "CM_DATASET": "squad" + }, + "group": "dataset" + }, + "open-orca": { + "deps": [ + { + "names": [ + "openorca-dataset" + ], + "tags": "get,dataset,openorca,preprocessed" + }, + { + "names": [ + "llama2-model" + ], + "tags": "get,ml-model,llama2" + } + ], + "env": { + "CM_DATASET": "openorca" + }, + "group": "dataset" + }, + "coco2014": { + "deps": [ + { + "names": [ + "coco2014-dataset", + "coco2014-original" + ], + "tags": "get,dataset,coco2014,original" + } + ], + "env": { + "CM_DATASET": "coco2014" + }, + "group": "dataset" + }, + "nvidia-pycocotools": { + "group": "coco-evaluation-tool" + }, + "default-pycocotools": { + "group": "coco-evaluation-tool", + "default": true + }, + "nvidia-pycocotools,openimages": { + "deps": [ + { + "tags": "get,generic-python-lib,_nvidia-pycocotools" + }, + { + "names": [ + "for-pycocotools", + "accuracy-check-src" + ], + "tags": "get,mlcommons,mlperf,inference,src,_openimages-nvidia-pycocotools" + } + ] + }, + "default-pycocotools,openimages": { + "deps": [ + { + "tags": "get,generic-python-lib,_pycocotools" + }, + { + "names": [ + "for-pycocotools", + "accuracy-check-src" + ], + "tags": "get,mlcommons,mlperf,inference,src,-_openimages-nvidia-pycocotools" + } + ] + } + } +} diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py new file mode 100644 index 0000000000..f7b13c16d9 --- /dev/null +++ b/script/process-mlperf-accuracy/customize.py @@ -0,0 +1,163 @@ +from cmind import utils +import cmind as cm +import os + +def preprocess(i): + + os_info = i['os_info'] + + xsep = ';' if os_info['platform'] == 'windows' else ':' + + env = i['env'] + results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") + + if results_dir == "": + print("Please set CM_MLPERF_ACCURACY_RESULTS_DIR") + return {'return':-1} + + # In fact, we expect only 1 command line here + run_cmds = [] + + if env.get('CM_MAX_EXAMPLES', '') != '' and env.get('CM_MLPERF_RUN_STYLE', '') != 'valid': + max_examples_string = " --max_examples " + env['CM_MAX_EXAMPLES'] + else: + max_examples_string = "" + + results_dir_split = results_dir.split(xsep) + dataset = env['CM_DATASET'] + regenerate_accuracy_file = env.get('CM_MLPERF_REGENERATE_ACCURACY_FILE', False) + + for result_dir in results_dir_split: + + out_file = os.path.join(result_dir, 'accuracy.txt') + + if os.path.exists(out_file) and (os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: + continue + + if dataset == "openimages": + if env.get('CM_DATASET_PATH_ROOT', '') != '': + dataset_dir = env['CM_DATASET_PATH_ROOT'] + if 'DATASET_ANNOTATIONS_FILE_PATH' in env: + del(env['DATASET_ANNOTATIONS_FILE_PATH']) + else: + env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + dataset_dir = os.getcwd() # not used, just to keep the script happy + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " "+"'" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ + "accuracy-openimages.py") + "'"+" --mlperf-accuracy-file "+"'" + os.path.join(result_dir, \ + "mlperf_log_accuracy.json") + "'"+" --openimages-dir "+"'" + dataset_dir + "'"+" --verbose > "+"'" + \ + out_file + "'" + + elif dataset == "imagenet": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['CM_DATASET_AUX_PATH'], + "val.txt") + "' --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + + elif dataset == "squad": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], + "accuracy-squad.py") + "' --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + \ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --vocab_file '" + env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ + "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ + "' --features_cache_file '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ + "' --output_dtype " + env['CM_ACCURACY_DTYPE'] + env.get('CM_OUTPUT_TRANSPOSED','') + max_examples_string + " > '" + out_file + "'" + + elif dataset == "cnndm": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", + "evaluation.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['CM_DATASET_EVAL_PATH'] + "'"+ " --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") +" > '" + out_file + "'" + + elif dataset == "openorca": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", + "evaluate-accuracy.py") + "' --checkpoint-path '" + env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['CM_DATASET_PREPROCESSED_PATH'] + "'"+ " --dtype " + env.get('CM_ACCURACY_DTYPE', "int32") +" > '" + out_file + "'" + + + elif dataset == "coco2014": + env['+PYTHONPATH'] = [ os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools") ] + #env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", + "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --caption-path '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "coco2014", "captions", "captions_source.tsv") + "' > '" + out_file + "'" + + elif dataset == "kits19": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], + "accuracy_kits.py") + \ + "' --preprocessed_data_dir '" + env['CM_DATASET_PREPROCESSED_PATH'] +\ + "' --postprocessed_data_dir '" + result_dir +\ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --output_dtype " + env['CM_ACCURACY_DTYPE'] +" > '" + out_file + "'" + + elif dataset == "librispeech": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_RNNT_PATH'], + "accuracy_eval.py") + \ + "' --dataset_dir '" + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") +\ + "' --manifest '" + env['CM_DATASET_PREPROCESSED_JSON'] +\ + "' --log_dir '" + result_dir + \ + "' --output_dtype " + env['CM_ACCURACY_DTYPE'] +" > '" + out_file + "'" + + elif dataset == "terabyte": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_DLRM_PATH'], "tools", + "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + \ + "' --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + + else: + return {'return': 1, 'error': 'Unsupported dataset'} + + outfile = os.path.join(result_dir, "accuracy.txt") + if not os.path.exists(outfile) or (os.stat(outfile).st_size == 0) or env.get("CM_REGENERATE_MEASURE_FILES", False): + run_cmds.append(CMD) + + + if os_info['platform'] == 'windows': + env['CM_RUN_CMDS'] = ('\n'.join(run_cmds)).replace("'", '"').replace('>','^>') + else: + env['CM_RUN_CMDS'] = "??".join(run_cmds) + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + + xsep = ';' if os_info['platform'] == 'windows' else ':' + + results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") + + results_dir_split = results_dir.split(xsep) + + for result_dir in results_dir_split: + accuracy_file = os.path.join(result_dir, "accuracy.txt") + + if os.path.exists(accuracy_file): + print ('') + print ('Accuracy file: {}'.format(accuracy_file)) + print ('') + + x = '' + with open(accuracy_file, "r") as fp: + x=fp.read() + + if x!='': + print(x) + + # Trying to extract accuracy dict + for y in x.split('\n'): + if y.startswith('{') and y.endswith('}'): + + import json + + try: + z=json.loads(y) + state['app_mlperf_inference_accuracy']=z + + break + except ValueError as e: + pass + + print ('') + return {'return':0} + diff --git a/script/process-mlperf-accuracy/run.bat b/script/process-mlperf-accuracy/run.bat new file mode 100644 index 0000000000..82705126d1 --- /dev/null +++ b/script/process-mlperf-accuracy/run.bat @@ -0,0 +1,8 @@ +echo Running command: +echo. +echo %CM_RUN_CMDS% +echo. + +%CM_RUN_CMDS% + +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/process-mlperf-accuracy/run.sh b/script/process-mlperf-accuracy/run.sh new file mode 100644 index 0000000000..6268860cbd --- /dev/null +++ b/script/process-mlperf-accuracy/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +IFS="??" read -r -a cmd_array <<< "$CM_RUN_CMDS" +for cmd in "${cmd_array[@]}" +do + echo "${cmd}" + eval ${cmd} + test $? -eq 0 || exit 1 +done diff --git a/script/prune-bert-models/README-extra.md b/script/prune-bert-models/README-extra.md new file mode 100644 index 0000000000..e98cb63328 --- /dev/null +++ b/script/prune-bert-models/README-extra.md @@ -0,0 +1 @@ +Moved [here](https://github.com/ctuning/cm4research/blob/main/script/reproduce-neurips-paper-2022-arxiv-2204.09656/README-extra.md). diff --git a/script/prune-bert-models/README.md b/script/prune-bert-models/README.md new file mode 100644 index 0000000000..51fb8572af --- /dev/null +++ b/script/prune-bert-models/README.md @@ -0,0 +1,187 @@ +Automatically generated README for this automation recipe: **prune-bert-models** + +Category: **AI/ML optimization** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=prune-bert-models,76182d4896414216) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *prune,bert-models,bert-prune,prune-bert-models* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "prune bert-models bert-prune prune-bert-models" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=prune,bert-models,bert-prune,prune-bert-models` + +`cm run script --tags=prune,bert-models,bert-prune,prune-bert-models[,variations] [--input_flags]` + +*or* + +`cmr "prune bert-models bert-prune prune-bert-models"` + +`cmr "prune bert-models bert-prune prune-bert-models [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prune,bert-models,bert-prune,prune-bert-models' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="prune,bert-models,bert-prune,prune-bert-models"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=prune,bert-models,bert-prune,prune-bert-models) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "prune bert-models bert-prune prune-bert-models[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_model.#` + - Environment variables: + - *CM_BERT_PRUNE_MODEL_NAME*: `#` + - *CM_MODEL_ZOO_STUB*: `#` + - Workflow: + * `_path.#` + - Environment variables: + - *CM_BERT_PRUNE_CKPT_PATH*: `#` + - Workflow: + * `_task.#` + - Environment variables: + - *CM_BERT_PRUNE_TASK*: `#` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--constraint=value` → `CM_BERT_PRUNE_CONSTRAINT=value` +* `--output_dir=value` → `CM_BERT_PRUNE_OUTPUT_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "constraint":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_BERT_PRUNE_TASK: `squad` +* CM_BERT_PRUNE_MODEL_NAME: `bert-large-uncased` +* CM_MODEL_ZOO_STUB: `bert-large-uncased` +* CM_BERT_PRUNE_CONSTRAINT: `0.5` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models/_cm.json)*** + * get,python3 + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_numpy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_scipy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_cupy + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_tqdm + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torch_cuda + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_datasets + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_transformers + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_scikit-learn + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,git,repo,_repo.https://github.com/cknowledge/retraining-free-pruning + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + * get,ml-model,model,zoo,model-zoo,huggingface,_prune + * CM names: `--adr.['get-model']...` + - CM script: [get-ml-model-huggingface-zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-bert-models/_cm.json) + +___ +### Script output +`cmr "prune bert-models bert-prune prune-bert-models [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/prune-bert-models/_cm.json b/script/prune-bert-models/_cm.json new file mode 100644 index 0000000000..a6ded2443c --- /dev/null +++ b/script/prune-bert-models/_cm.json @@ -0,0 +1,87 @@ +{ + "alias": "prune-bert-models", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "AI/ML optimization", + "default_env": { + "CM_BERT_PRUNE_TASK":"squad", + "CM_BERT_PRUNE_MODEL_NAME":"bert-large-uncased", + "CM_MODEL_ZOO_STUB":"bert-large-uncased", + "CM_BERT_PRUNE_CONSTRAINT": "0.5" + }, + "input_mapping": { + "constraint": "CM_BERT_PRUNE_CONSTRAINT", + "output_dir": "CM_BERT_PRUNE_OUTPUT_DIR" + }, + "deps": [ + { + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_numpy" + }, + { + "tags": "get,generic-python-lib,_scipy" + }, + { + "tags": "get,generic-python-lib,_cupy" + }, + { + "tags": "get,generic-python-lib,_tqdm" + }, + { + "tags": "get,generic-python-lib,_torch_cuda" + }, + { + "tags": "get,generic-python-lib,_datasets" + }, + { + "tags": "get,generic-python-lib,_transformers" + }, + { + "tags": "get,generic-python-lib,_scikit-learn" + }, + { + "tags": "get,git,repo,_repo.https://github.com/cknowledge/retraining-free-pruning", + "env": { + "CM_GIT_ENV_KEY":"BERT_PRUNER_NEURIPS_2022" + } + }, + { + "names": [ + "get-model" + ], + "tags": "get,ml-model,model,zoo,model-zoo,huggingface,_prune" + } + ], + "tags": [ + "prune", + "bert-models", + "bert-prune", + "prune-bert-models" + ], + "uid": "76182d4896414216", + "variations":{ + "path.#":{ + "env":{ + "CM_BERT_PRUNE_CKPT_PATH":"#" + } + }, + "task.#":{ + "env":{ + "CM_BERT_PRUNE_TASK":"#" + } + }, + "model.#":{ + "adr":{ + "get-model":{ + "tags":"_model-stub.#" + } + }, + "env":{ + "CM_BERT_PRUNE_MODEL_NAME":"#", + "CM_MODEL_ZOO_STUB":"#" + } + } + } +} diff --git a/script/prune-bert-models/customize.py b/script/prune-bert-models/customize.py new file mode 100644 index 0000000000..34e0810231 --- /dev/null +++ b/script/prune-bert-models/customize.py @@ -0,0 +1,48 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + ckpt_path = env.get('CM_BERT_PRUNE_CKPT_PATH','') + if ckpt_path == '': + p = env['CM_ML_MODEL_FILE_WITH_PATH'] + x = os.listdir(p) + for y in x: + if y.startswith('models--'): + z = os.path.join(p,y) + if os.path.isdir(z): + z1 = os.path.join(z, 'snapshots') + if os.path.isdir(z1): + z2 = os.listdir(z1) + if len(z2)>0: + ckpt_path=os.path.join(z1, z2[0]) + + env['CM_BERT_PRUNE_CKPT_PATH'] = ckpt_path + + out_dir=env.get('CM_BERT_PRUNE_OUTPUT_DIR','') + if out_dir == '': + out_dir = os.path.join(os.getcwd(), 'pruned-model-output') + env['CM_BERT_PRUNE_OUTPUT_DIR'] = out_dir + + print ('') + print ('Local CM cache path to the updated BERT pruner src from NeurIPS 2022: ' + env['CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) + + print ('') + for k in ["CM_ML_MODEL_FILE_WITH_PATH", "CM_BERT_PRUNE_CKPT_PATH", "CM_BERT_PRUNE_OUTPUT_DIR"]: + print ('ENV["{}"]: {}'.format(k, env[k])) + + print ('') + + return {'return': 0} + +def postprocess(i): + + env = i['env'] + + print("Entered postprocess") + + return {'return': 0} diff --git a/script/prune-bert-models/run.sh b/script/prune-bert-models/run.sh new file mode 100644 index 0000000000..68c0779688 --- /dev/null +++ b/script/prune-bert-models/run.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +echo "====================================================================" +echo "Start pruning ..." +echo "" + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +time ${CM_PYTHON_BIN_WITH_PATH} \ + ${CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH}/main.py \ + --model_name ${CM_BERT_PRUNE_MODEL_NAME} \ + --task_name ${CM_BERT_PRUNE_TASK} \ + --ckpt_dir ${CM_BERT_PRUNE_CKPT_PATH} \ + --constraint ${CM_BERT_PRUNE_CONSTRAINT} \ + --output_dir ${CM_BERT_PRUNE_OUTPUT_DIR} + +test $? -eq 0 || exit $? + +echo "====================================================================" diff --git a/script/prune-docker/README.md b/script/prune-docker/README.md new file mode 100644 index 0000000000..496e0b87d3 --- /dev/null +++ b/script/prune-docker/README.md @@ -0,0 +1,120 @@ +Automatically generated README for this automation recipe: **prune-docker** + +Category: **Docker automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=prune-docker,27ead88809bb4d4e) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *prune,docker* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "prune docker" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=prune,docker` + +`cm run script --tags=prune,docker ` + +*or* + +`cmr "prune docker"` + +`cmr "prune docker " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prune,docker' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="prune,docker"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=prune,docker) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "prune docker" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json) + +___ +### Script output +`cmr "prune docker " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/prune-docker/_cm.json b/script/prune-docker/_cm.json new file mode 100644 index 0000000000..c7f9cfcb31 --- /dev/null +++ b/script/prune-docker/_cm.json @@ -0,0 +1,11 @@ +{ + "alias": "prune-docker", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Docker automation", + "tags": [ + "prune", + "docker" + ], + "uid": "27ead88809bb4d4e" +} diff --git a/script/prune-docker/run.bat b/script/prune-docker/run.bat new file mode 100644 index 0000000000..980baad8ec --- /dev/null +++ b/script/prune-docker/run.bat @@ -0,0 +1 @@ +docker system prune -a --volumes diff --git a/script/prune-docker/run.sh b/script/prune-docker/run.sh new file mode 100644 index 0000000000..eb849e376a --- /dev/null +++ b/script/prune-docker/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker system prune -a --volumes diff --git a/script/publish-results-to-dashboard/README.md b/script/publish-results-to-dashboard/README.md new file mode 100644 index 0000000000..d291faab3e --- /dev/null +++ b/script/publish-results-to-dashboard/README.md @@ -0,0 +1,125 @@ +Automatically generated README for this automation recipe: **publish-results-to-dashboard** + +Category: **Dashboard automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=publish-results-to-dashboard,4af3a2d09f14412b) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/publish-results-to-dashboard)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *publish-results,dashboard* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "publish-results dashboard" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=publish-results,dashboard` + +`cm run script --tags=publish-results,dashboard ` + +*or* + +`cmr "publish-results dashboard"` + +`cmr "publish-results dashboard " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'publish-results,dashboard' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="publish-results,dashboard"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=publish-results,dashboard) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "publish-results dashboard" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/publish-results-to-dashboard/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_wandb + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/publish-results-to-dashboard/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/publish-results-to-dashboard/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/publish-results-to-dashboard/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/publish-results-to-dashboard/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/publish-results-to-dashboard/_cm.json) + +___ +### Script output +`cmr "publish-results dashboard " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/publish-results-to-dashboard/_cm.json b/script/publish-results-to-dashboard/_cm.json new file mode 100644 index 0000000000..df18b22f88 --- /dev/null +++ b/script/publish-results-to-dashboard/_cm.json @@ -0,0 +1,23 @@ +{ + "alias": "publish-results-to-dashboard", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Dashboard automation", + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_wandb" + } + ], + "tags": [ + "publish-results", + "dashboard" + ], + "uid": "4af3a2d09f14412b" +} diff --git a/script/publish-results-to-dashboard/code.py b/script/publish-results-to-dashboard/code.py new file mode 100644 index 0000000000..1c9732fa06 --- /dev/null +++ b/script/publish-results-to-dashboard/code.py @@ -0,0 +1,92 @@ +# Developer: Grigori Fursin + +import os + +def main(): + # For now quick prototype hardwired to "summary.json" from MLPerf + # Later need to clean it and make it universal + + print ('') + print ('Reading summary.json ...') + print ('') + + import json + filename = os.environ.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY','') + if filename=='': + filename = 'summary' + filename+='.json' + + f = open(filename) + + results = json.load(f) + + f.close() + + print ('=========================================================') + print ('Sending results to W&B dashboard ...') + print ('') + + import wandb + + env = os.environ + + dashboard_user = env.get('CM_MLPERF_DASHBOARD_WANDB_USER', '') + if dashboard_user == '': dashboard_user = 'cmind' + + dashboard_project = env.get('CM_MLPERF_DASHBOARD_WANDB_PROJECT', '') + if dashboard_project == '': dashboard_project = 'cm-mlperf-dse-testing' + + for k in results: + + result=results[k] + + organization = str(result.get('Organization', '')) + if organization == '': organization = 'anonymous' + + label = organization + + system_name = str(result.get('SystemName','')) + if system_name != '': label += '(' + system_name + ')' + + qps = result.get('Result', 0.0) + accuracy = result.get('Accuracy', 0.0) / 100 + + result['performance'] = qps + result['qps'] = qps + result['accuracy'] = accuracy + + # Check extra env variables + x = { + "lang": "CM_MLPERF_LANG", + "device": "CM_MLPERF_DEVICE", + "submitter": "CM_MLPERF_SUBMITTER", + "backend": "CM_MLPERF_BACKEND", + "model": "CM_MLPERF_MODEL", + "run_style": "CM_MLPERF_RUN_STYLE", + "rerun": "CM_RERUN", + "hw_name": "CM_HW_NAME", + "max_batchsize": "CM_MLPERF_LOADGEN_MAX_BATCHSIZE", + "num_threads": "CM_NUM_THREADS", + "scenario": "CM_MLPERF_LOADGEN_SCENARIO", + "test_query_count": "CM_TEST_QUERY_COUNT", + "run_checker": "CM_RUN_SUBMISSION_CHECKER", + "skip_truncation": "CM_SKIP_TRUNCATE_ACCURACY" + } + + for k in x: + env_key = x[k] + if os.environ.get(env_key,'')!='': + result['cm_misc_input_'+k]=os.environ[env_key] + + wandb.init(entity = dashboard_user, + project = dashboard_project, + name = label) + + wandb.log(result) + + wandb.finish() + + print ('=========================================================') + +if __name__ == '__main__': + main() diff --git a/script/publish-results-to-dashboard/run.bat b/script/publish-results-to-dashboard/run.bat new file mode 100644 index 0000000000..37f249b0fe --- /dev/null +++ b/script/publish-results-to-dashboard/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/publish-results-to-dashboard/run.sh b/script/publish-results-to-dashboard/run.sh new file mode 100644 index 0000000000..288833adb6 --- /dev/null +++ b/script/publish-results-to-dashboard/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# For now login to WANDB anonymously +wandb login --anonymously --relogin + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/script/pull-git-repo/README.md b/script/pull-git-repo/README.md new file mode 100644 index 0000000000..a073a8c35e --- /dev/null +++ b/script/pull-git-repo/README.md @@ -0,0 +1,136 @@ +Automatically generated README for this automation recipe: **pull-git-repo** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=pull-git-repo,c23132ed65c4421d) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *pull,git,repo,repository* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "pull git repo repository" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=pull,git,repo,repository` + +`cm run script --tags=pull,git,repo,repository [--input_flags]` + +*or* + +`cmr "pull git repo repository"` + +`cmr "pull git repo repository " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'pull,git,repo,repository' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="pull,git,repo,repository"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=pull,git,repo,repository) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "pull git repo repository" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--path=value` → `CM_GIT_CHECKOUT_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/pull-git-repo/_cm.json) + +___ +### Script output +`cmr "pull git repo repository " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/pull-git-repo/_cm.json b/script/pull-git-repo/_cm.json new file mode 100644 index 0000000000..d531c013a6 --- /dev/null +++ b/script/pull-git-repo/_cm.json @@ -0,0 +1,27 @@ +{ + "alias": "pull-git-repo", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "DevOps automation", + "default_env": { + }, + "deps": [ + { + "tags": "detect,os" + } + ], + "input_mapping": { + "path": "CM_GIT_CHECKOUT_PATH" + }, + "new_env_keys": [ + ], + "tags": [ + "pull", + "git", + "repo", + "repository" + ], + "uid": "c23132ed65c4421d", + "variations": { + } +} diff --git a/script/pull-git-repo/customize.py b/script/pull-git-repo/customize.py new file mode 100644 index 0000000000..021d42465e --- /dev/null +++ b/script/pull-git-repo/customize.py @@ -0,0 +1,28 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_CHECKOUT_PATH' not in env: + return {'return':1, 'error': 'CM_GIT_CHECKOUT_PATH is not set'} + + env['CM_GIT_PULL_CMD'] = "git pull --rebase" + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return':0} diff --git a/script/pull-git-repo/run.sh b/script/pull-git-repo/run.sh new file mode 100644 index 0000000000..66cf8406f0 --- /dev/null +++ b/script/pull-git-repo/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +path=${CM_GIT_CHECKOUT_PATH} +echo "cd $path" + +cd $path +test $? -eq 0 || exit 1 + +echo ${CM_GIT_PULL_CMD} +eval ${CM_GIT_PULL_CMD} +test $? -eq 0 || exit 1 + +cd $CUR_DIR diff --git a/script/push-csv-to-spreadsheet/README.md b/script/push-csv-to-spreadsheet/README.md new file mode 100644 index 0000000000..6f8cfcfa12 --- /dev/null +++ b/script/push-csv-to-spreadsheet/README.md @@ -0,0 +1,144 @@ +Automatically generated README for this automation recipe: **push-csv-to-spreadsheet** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=push-csv-to-spreadsheet,5ec9e5fa7feb4fff) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet` + +`cm run script --tags=push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet [--input_flags]` + +*or* + +`cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet"` + +`cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "push google-spreadsheet spreadsheet push-to-google-spreadsheet" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--csv_file=value` → `CM_CSV_FILE_PATH=value` +* `--sheet_name=value` → `CM_GOOGLE_SHEET_NAME=value` +* `--spreadsheet_id=value` → `CM_GOOGLE_SPREADSHEET_ID=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "csv_file":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_GOOGLE_SPREADSHEET_ID: `1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_google-api-python-client + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_google-auth-oauthlib + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-csv-to-spreadsheet/_cm.json) + +___ +### Script output +`cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/push-csv-to-spreadsheet/_cm.json b/script/push-csv-to-spreadsheet/_cm.json new file mode 100644 index 0000000000..79fd45ea19 --- /dev/null +++ b/script/push-csv-to-spreadsheet/_cm.json @@ -0,0 +1,36 @@ +{ + "alias": "push-csv-to-spreadsheet", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "DevOps automation", + "default_env": { + "CM_GOOGLE_SPREADSHEET_ID": "1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y" + }, + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_google-api-python-client" + }, + { + "tags": "get,generic-python-lib,_google-auth-oauthlib" + } + ], + "input_mapping": { + "spreadsheet_id": "CM_GOOGLE_SPREADSHEET_ID", + "sheet_name": "CM_GOOGLE_SHEET_NAME", + "csv_file": "CM_CSV_FILE_PATH" + }, + "tags": [ + "push", + "google-spreadsheet", + "spreadsheet", + "push-to-google-spreadsheet" + ], + "uid": "5ec9e5fa7feb4fff" +} diff --git a/script/push-csv-to-spreadsheet/customize.py b/script/push-csv-to-spreadsheet/customize.py new file mode 100644 index 0000000000..e80f262666 --- /dev/null +++ b/script/push-csv-to-spreadsheet/customize.py @@ -0,0 +1,15 @@ +from cmind import utils +import cmind as cm +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + + return {'return':0} + +def postprocess(i): + return {'return':0} diff --git a/script/push-csv-to-spreadsheet/google_api.py b/script/push-csv-to-spreadsheet/google_api.py new file mode 100644 index 0000000000..d1e7643aa4 --- /dev/null +++ b/script/push-csv-to-spreadsheet/google_api.py @@ -0,0 +1,55 @@ +from __future__ import print_function + +import os.path +import os +import csv +from google.auth.transport.requests import Request +from google.oauth2.credentials import Credentials +from google_auth_oauthlib.flow import InstalledAppFlow +from googleapiclient.discovery import build +from googleapiclient.errors import HttpError + +# If modifying these scopes, delete the file token.json. +SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] + +# The ID of a sample document. +DOCUMENT_ID = os.environ['CM_GOOGLE_SPREADSHEET_ID'] + + +def main(): + """Shows basic usage of the Docs API. + Prints the title of a sample document. + """ + creds = None + # The file token.json stores the user's access and refresh tokens, and is + # created automatically when the authorization flow completes for the first + # time. + if os.path.exists('token.json'): + creds = Credentials.from_authorized_user_file('token.json', SCOPES) + # If there are no (valid) credentials available, let the user log in. + if not creds or not creds.valid: + if creds and creds.expired and creds.refresh_token: + creds.refresh(Request()) + else: + flow = InstalledAppFlow.from_client_secrets_file( + 'credentials.json', SCOPES) + creds = flow.run_local_server(port=0) + # Save the credentials for the next run + with open('token.json', 'w') as token: + token.write(creds.to_json()) + + try: + service = build("sheets", "v4", credentials=creds) + sheet_name = os.environ.get('CM_GOOGLE_SHEET_NAME', 'Sheet1') + csv_file = os.environ['CM_CSV_FILE_PATH'] + + f = open(csv_file, "r") + values = [r for r in csv.reader(f)] + request = service.spreadsheets().values().update(spreadsheetId=DOCUMENT_ID, range=sheet_name, valueInputOption="USER_ENTERED", body={"values": values}).execute() + + except HttpError as err: + print(err) + + +if __name__ == '__main__': + main() diff --git a/script/push-csv-to-spreadsheet/run.sh b/script/push-csv-to-spreadsheet/run.sh new file mode 100644 index 0000000000..5ba4257d54 --- /dev/null +++ b/script/push-csv-to-spreadsheet/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/google_api.py diff --git a/script/push-mlperf-inference-results-to-github/README.md b/script/push-mlperf-inference-results-to-github/README.md new file mode 100644 index 0000000000..a7b565db92 --- /dev/null +++ b/script/push-mlperf-inference-results-to-github/README.md @@ -0,0 +1,151 @@ +Automatically generated README for this automation recipe: **push-mlperf-inference-results-to-github** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=push-mlperf-inference-results-to-github,36c2ffd5df5d453a) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *push,mlperf,mlperf-inference-results,publish-results,inference,submission,github* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "push mlperf mlperf-inference-results publish-results inference submission github" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=push,mlperf,mlperf-inference-results,publish-results,inference,submission,github` + +`cm run script --tags=push,mlperf,mlperf-inference-results,publish-results,inference,submission,github [--input_flags]` + +*or* + +`cmr "push mlperf mlperf-inference-results publish-results inference submission github"` + +`cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'push,mlperf,mlperf-inference-results,publish-results,inference,submission,github' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="push,mlperf,mlperf-inference-results,publish-results,inference,submission,github"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=push,mlperf,mlperf-inference-results,publish-results,inference,submission,github) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "push mlperf mlperf-inference-results publish-results inference submission github" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--branch=value` → `CM_GIT_BRANCH=value` +* `--commit_message=value` → `CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE=value` +* `--repo_branch=value` → `CM_GIT_BRANCH=value` +* `--repo_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value` +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "branch":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_RESULTS_GIT_REPO_URL: `https://github.com/ctuning/mlperf_inference_submissions_v4.0` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github/_cm.json)*** + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-sys-util,_rsync + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,mlperf,submission,dir + * `if (CM_MLPERF_INFERENCE_SUBMISSION_DIR != on)` + * CM names: `--adr.['get-mlperf-submission-dir']...` + - CM script: [get-mlperf-inference-submission-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-submission-dir) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github/_cm.json)*** + * get,git,repo + * CM names: `--adr.['get-git-repo']...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/push-mlperf-inference-results-to-github/_cm.json) + +___ +### Script output +`cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/push-mlperf-inference-results-to-github/_cm.json b/script/push-mlperf-inference-results-to-github/_cm.json new file mode 100644 index 0000000000..132b590b69 --- /dev/null +++ b/script/push-mlperf-inference-results-to-github/_cm.json @@ -0,0 +1,53 @@ +{ + "alias": "push-mlperf-inference-results-to-github", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "MLPerf benchmark support", + "deps": [ + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-sys-util,_rsync" + }, + { + "tags": "get,mlperf,submission,dir", + "names": [ + "get-mlperf-submission-dir" + ], + "skip_if_env": { + "CM_MLPERF_INFERENCE_SUBMISSION_DIR": [ "on" ] + } + } + ], + "prehook_deps": [ + { + "names": [ "get-git-repo" ], + "tags": "get,git,repo" + } + ], + "default_env": { + "CM_MLPERF_RESULTS_GIT_REPO_URL": "https://github.com/ctuning/mlperf_inference_submissions_v4.0" + }, + "input_mapping": { + "repo_url": "CM_MLPERF_RESULTS_GIT_REPO_URL", + "submission_dir": "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "repo_branch": "CM_GIT_BRANCH", + "branch": "CM_GIT_BRANCH", + "commit_message": "CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE" + }, + "tags": [ + "push", + "mlperf", + "mlperf-inference-results", + "publish-results", + "inference", + "submission", + "github" + ], + "uid": "36c2ffd5df5d453a" +} diff --git a/script/push-mlperf-inference-results-to-github/customize.py b/script/push-mlperf-inference-results-to-github/customize.py new file mode 100644 index 0000000000..dcdc3c0bfe --- /dev/null +++ b/script/push-mlperf-inference-results-to-github/customize.py @@ -0,0 +1,35 @@ +from cmind import utils +import cmind as cm +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + + repo = env.get('CM_MLPERF_RESULTS_GIT_REPO_URL', '') + if repo.strip() == '': + return {'return': 1, 'error': 'Invalid GIT_REPO_URL for MLPERF results'} + + branch = env.get('CM_GIT_BRANCH', '') + if branch: + extra_tags_string = f",_branch.{branch}" + else: + extra_tags_string = "" + + r = automation.update_deps({'deps':meta['prehook_deps'], + 'update_deps':{ + 'get-git-repo':{ + 'tags':"_repo."+repo+extra_tags_string + } + } + }) + if r['return']>0: return r + env['CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE'] = env.get('CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE', 'Added new results') + + return {'return':0} + +def postprocess(i): + return {'return':0} diff --git a/script/push-mlperf-inference-results-to-github/run.sh b/script/push-mlperf-inference-results-to-github/run.sh new file mode 100644 index 0000000000..ac3a50d9f4 --- /dev/null +++ b/script/push-mlperf-inference-results-to-github/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +cd "${CM_GIT_CHECKOUT_PATH}" +git pull +git add * +if [[ -n ${CM_MLPERF_INFERENCE_SUBMISSION_DIR} ]]; then + rsync -avz "${CM_MLPERF_INFERENCE_SUBMISSION_DIR}/" "${CM_GIT_CHECKOUT_PATH}/" + git add * +fi +test $? -eq 0 || exit $? + +git commit -a -m "${CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE}" +git push +test $? -eq 0 || exit $? diff --git a/script/remote-run-commands/README-extra.md b/script/remote-run-commands/README-extra.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/script/remote-run-commands/README.md b/script/remote-run-commands/README.md new file mode 100644 index 0000000000..95bd9c4e04 --- /dev/null +++ b/script/remote-run-commands/README.md @@ -0,0 +1,147 @@ +Automatically generated README for this automation recipe: **remote-run-commands** + +Category: **Remote automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=remote-run-commands,b71e24b03c9d49cd) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh` + +`cm run script --tags=remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh [--input_flags]` + +*or* + +`cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh"` + +`cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "remote run cmds remote-run remote-run-cmds ssh-run ssh" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--client_refresh=value` → `CM_SSH_CLIENT_REFRESH=value` +* `--host=value` → `CM_SSH_HOST=value` +* `--password=value` → `CM_SSH_PASSWORD=value` +* `--port=value` → `CM_SSH_PORT=value` +* `--run_cmds=value` → `CM_SSH_RUN_COMMANDS=value` +* `--skip_host_verify=value` → `CM_SSH_SKIP_HOST_VERIFY=value` +* `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value` +* `--user=value` → `CM_SSH_USER=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "client_refresh":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_SSH_PORT: `22` +* CM_SSH_HOST: `localhost` +* CM_SSH_USER: `$USER` +* CM_SSH_CLIENT_REFRESH: `10` +* CM_SSH_KEY_FILE: `$HOME/.ssh/id_rsa` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/remote-run-commands/_cm.json) + +___ +### Script output +`cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/remote-run-commands/_cm.json b/script/remote-run-commands/_cm.json new file mode 100644 index 0000000000..e4d8f6754f --- /dev/null +++ b/script/remote-run-commands/_cm.json @@ -0,0 +1,33 @@ +{ + "alias": "remote-run-commands", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Remote automation", + "tags": [ + "remote", + "run", + "cmds", + "remote-run", + "remote-run-cmds", + "ssh-run", + "ssh" + ], + "uid": "b71e24b03c9d49cd", + "input_mapping": { + "host": "CM_SSH_HOST", + "port": "CM_SSH_PORT", + "skip_host_verify": "CM_SSH_SKIP_HOST_VERIFY", + "client_refresh": "CM_SSH_CLIENT_REFRESH", + "run_cmds": "CM_SSH_RUN_COMMANDS", + "user": "CM_SSH_USER", + "password": "CM_SSH_PASSWORD", + "ssh_key_file": "CM_SSH_KEY_FILE" + }, + "default_env": { + "CM_SSH_PORT": "22", + "CM_SSH_HOST": "localhost", + "CM_SSH_USER": "$USER", + "CM_SSH_CLIENT_REFRESH": "10", + "CM_SSH_KEY_FILE": "$HOME/.ssh/id_rsa" + } +} diff --git a/script/remote-run-commands/customize.py b/script/remote-run-commands/customize.py new file mode 100644 index 0000000000..78676a2d0f --- /dev/null +++ b/script/remote-run-commands/customize.py @@ -0,0 +1,49 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + cmd_string='' + + #pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', ['source $HOME/cm/bin/activate']) + pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', []) + + run_cmds = env.get('CM_SSH_RUN_COMMANDS', []) + + run_cmds = pre_run_cmds + run_cmds + + for i,cmd in enumerate(run_cmds): + if 'cm ' in cmd: + #cmd=cmd.replace(":", "=") + cmd=cmd.replace(";;", ",") + run_cmds[i] = cmd + + cmd_string += " ; ".join(run_cmds) + user = env.get('CM_SSH_USER') + password = env.get('CM_SSH_PASSWORD', None) + host = env.get('CM_SSH_HOST') + if password: + password_string = " -p "+password + else: + password_string = "" + cmd_extra = '' + + if env.get("CM_SSH_SKIP_HOST_VERIFY"): + cmd_extra += " -o StrictHostKeyChecking=no" + if env.get("CM_SSH_KEY_FILE"): + cmd_extra += " -i "+env.get("CM_SSH_KEY_FILE") + + ssh_command = "ssh "+user+"@"+host+password_string+ cmd_extra + " '"+cmd_string + "'" + env['CM_SSH_CMD'] = ssh_command + + return {'return':0} + +def postprocess(i): + + return {'return':0} + + diff --git a/script/remote-run-commands/run.bat b/script/remote-run-commands/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/remote-run-commands/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/remote-run-commands/run.sh b/script/remote-run-commands/run.sh new file mode 100644 index 0000000000..f9fac760b5 --- /dev/null +++ b/script/remote-run-commands/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +cmd=$CM_SSH_CMD +echo $cmd +eval $cmd diff --git a/script/reproduce-ipol-paper-2022-439/README-extra.md b/script/reproduce-ipol-paper-2022-439/README-extra.md new file mode 100644 index 0000000000..fa37618881 --- /dev/null +++ b/script/reproduce-ipol-paper-2022-439/README-extra.md @@ -0,0 +1,75 @@ +# CM-based reproducibility demo for IPOL journal + +This is a part of the [open challenge](https://access.cknowledge.org/playground/?action=challenges&name=f284c08891c44058) +to make it easier to reproduce experimental results from research papers +using the [MLCommons CM scripting language](https://github.com/mlcommons/ck). + +Code and sample images are taken from https://ipolcore.ipol.im/demo/clientApp/demo.html?id=439 . + +The demo illustrates the method proposed by Daudt et al. (2019) for change detection on satellite images. It takes as input two color images in PNG format. Both images should be satellites images of the same area, and co-registered. +The output image is a change map. For each pixel in the input images, the value of the change map is 1 if a change is detected and 0 otherwise. + +Pair of images from the OSCD test set are already provided with the demo. For those images, +the ground truth is available in the original dataset: https://ieee-dataport.org/open-access/oscd-onera-satellite-change-detection. + +## Authors + +* [Jose Hernandez](https://www.linkedin.com/in/jose-hernandez-a261182b) +* [Grigori Fursin](https://cKnowledge.org/gfursin) + +## Initial discussion and materials + +* https://github.com/mlcommons/ck/issues/617 +* http://www.ipol.im/pub/art/2022/439/ +* https://access.cknowledge.org/playground/?action=challenges&name=reproduce-and-automate-ipol-paper + +## Implementation + +We implemented 2 CM scripts for this challenge: + +* [Download IPOL paper sources and cache them in CM](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ipol-src) +* [Run IPOL 2022 439 paper demo using above script and PyTorch](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-ipol-paper-2022-439) + +## Reproducibility + +CM scripts are implemented for a demo on Ubuntu and must be tested across different systems: + +1. Install MLCommons CM(CK2) automation framework as described [here](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +2. Install MLCommons repository with CM automation scripts: + +```bash +cm pull repo mlcommons@ck +``` + +3. Install src from IPOL 2022 439 paper: +```bash +cm run script "get ipol src" --year=2022 --number=439 + +cm show cache --tags=ipol,src +``` + +4. Download sample images and run demo (CM will detect or install missing dependencies) +```bash +cm run script "download file _wget" --url=https://cKnowledge.org/ai/data/ipol-paper-2024-439-sample-image-1.png --verify=no --env.CM_DOWNLOAD_CHECKSUM=850639287ad23194576582680c2ecfc3 +cm run script "download file _wget" --url=https://cKnowledge.org/ai/data/ipol-paper-2024-439-sample-image-2.png --verify=no --env.CM_DOWNLOAD_CHECKSUM=31364c03d91873ed2d244cce6d664dd0 +cm run script "reproduce ipol 2022-439" +cm run script "reproduce ipol 2022-439" --adr.torch.version=1.13.1 --adr.torchvision.version=0.14.1 +``` + +This script will use 2 sample images from this paper +and should produce *diff.png* in the current directory. + +## Usage with different images + +You can use other 2 images by specifying their full path as follows: +```bash +cm run script "reproduce ipol 2022-439" \ + --image1={full path to png image 1} \ + --image2={full path to png image 2} +``` + +## Collaborative development + +Join the public [MLCommons Task Force on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +to participate in further collaborative developments. diff --git a/script/reproduce-ipol-paper-2022-439/README.md b/script/reproduce-ipol-paper-2022-439/README.md new file mode 100644 index 0000000000..ab93456d97 --- /dev/null +++ b/script/reproduce-ipol-paper-2022-439/README.md @@ -0,0 +1,150 @@ +Automatically generated README for this automation recipe: **reproduce-ipol-paper-2022-439** + +Category: **Reproducibility and artifact evaluation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=reproduce-ipol-paper-2022-439,f9b9e5bd65e34e4f) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439` + +`cm run script --tags=app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439 [--input_flags]` + +*or* + +`cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439"` + +`cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--image1=value` → `CM_IMAGE_1=value` +* `--image2=value` → `CM_IMAGE_2=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "image1":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,ipol,src + * CM names: `--adr.['ipol-src']...` + - CM script: [get-ipol-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ipol-src) + * get,generic-python-lib,_torch + * CM names: `--adr.['torch']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_torchvision + * CM names: `--adr.['torchvision']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-ipol-paper-2022-439/_cm.yaml) + +___ +### Script output +`cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/reproduce-ipol-paper-2022-439/_cm.yaml b/script/reproduce-ipol-paper-2022-439/_cm.yaml new file mode 100644 index 0000000000..bd7c9e1401 --- /dev/null +++ b/script/reproduce-ipol-paper-2022-439/_cm.yaml @@ -0,0 +1,40 @@ +alias: reproduce-ipol-paper-2022-439 +uid: f9b9e5bd65e34e4f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Reproducibility and artifact evaluation + +input_mapping: + image1: CM_IMAGE_1 + image2: CM_IMAGE_2 + +deps: +- tags: detect,os +- tags: get,python3 + names: + - python + - python3 +- tags: get,ipol,src + names: + - ipol-src +- tags: get,generic-python-lib,_torch + names: + - torch +- tags: get,generic-python-lib,_torchvision + names: + - torchvision + +tags: +- app +- python +- reproduce +- project +- paper +- ipol +- journal +- repro +- reproducibility +- pytorch +- 2022-439 diff --git a/script/reproduce-ipol-paper-2022-439/customize.py b/script/reproduce-ipol-paper-2022-439/customize.py new file mode 100644 index 0000000000..6b57ab932f --- /dev/null +++ b/script/reproduce-ipol-paper-2022-439/customize.py @@ -0,0 +1,34 @@ +from cmind import utils +import os + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + + # Check if input files are empty and add files + input_file_1 = env.get('CM_INPUT_1','') + if input_file_1 == '': input_file_1 = 'ipol-paper-2024-439-sample-image-1.png' + + if not os.path.isfile(input_file_1): + return {'return':1, 'error':'input file 1 "{}" not found'.format(input_file_1)} + + env['CM_INPUT_1']=os.path.abspath(input_file_1) + + input_file_2 = env.get('CM_INPUT_2','') + if input_file_2 == '': input_file_2 = 'ipol-paper-2024-439-sample-image-2.png' + + if not os.path.isfile(input_file_2): + return {'return':1, 'error':'input file 2 "{}" not found'.format(input_file_2)} + + env['CM_INPUT_2']=os.path.abspath(input_file_2) + + return {'return':0} + +def postprocess(i): + + print ('') + print ('Please check "diff.png"') + print ('') + + return {'return':0} diff --git a/script/reproduce-ipol-paper-2022-439/requirements.txt b/script/reproduce-ipol-paper-2022-439/requirements.txt new file mode 100644 index 0000000000..82a4d6034b --- /dev/null +++ b/script/reproduce-ipol-paper-2022-439/requirements.txt @@ -0,0 +1,5 @@ +jupyter +numpy +imageio +IPython +scikit-image diff --git a/script/reproduce-ipol-paper-2022-439/run.bat b/script/reproduce-ipol-paper-2022-439/run.bat new file mode 100644 index 0000000000..7aafa4a34a --- /dev/null +++ b/script/reproduce-ipol-paper-2022-439/run.bat @@ -0,0 +1,33 @@ +@echo off + +echo ======================================================= + +set CUR_DIR=%cd% +echo Current path in CM script: %CUR_DIR% + +echo. +echo Installing extra requirements (latest versions) ... + +echo. +%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + +echo ======================================================= + +cd %CM_IPOL_PATH% + +echo Current path in CM cache: %cd% + +echo Running author's code ... + +del /F /Q cm.png +del /F /Q %CUR_DIR%\diff.png + +echo. +%CM_PYTHON_BIN_WITH_PATH% main.py --input_0=%CM_INPUT_1% --input_1=%CM_INPUT_2% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rem Copy diff png to current path +copy /B cm.png %CUR_DIR%\diff.png +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= diff --git a/script/reproduce-ipol-paper-2022-439/run.sh b/script/reproduce-ipol-paper-2022-439/run.sh new file mode 100644 index 0000000000..99a4746270 --- /dev/null +++ b/script/reproduce-ipol-paper-2022-439/run.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +echo "=======================================================" + +CUR_DIR=${PWD} +echo "Current path in CM script: ${CUR_DIR}" + +echo "" +echo "Installing extra requirements (latest versions) ..." + +echo "" +${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + +echo "=======================================================" + +cd ${CM_IPOL_PATH} + +echo "Current path in CM cache: ${PWD}" + +# Check default images +if [ "${CM_INPUT_1}" == "" ]; then + CM_INPUT_1=${CM_TMP_CURRENT_SCRIPT_PATH}/sample-images/1.png +fi + +if [ "${CM_INPUT_2}" == "" ]; then + CM_INPUT_2=${CM_TMP_CURRENT_SCRIPT_PATH}/sample-images/2.png +fi + +echo "Running author's code ..." + +rm -f cm.png +rm -f ${CUR_DIR}/diff.png + +echo "" +${CM_PYTHON_BIN_WITH_PATH} main.py --input_0=${CM_INPUT_1} --input_1=${CM_INPUT_2} +test $? -eq 0 || exit 1 + +# Copy diff png to current path +cp cm.png ${CUR_DIR}/diff.png +test $? -eq 0 || exit 1 + +echo "=======================================================" diff --git a/script/reproduce-micro-paper-2023-victima/README-extra.md b/script/reproduce-micro-paper-2023-victima/README-extra.md new file mode 100644 index 0000000000..2aca8543f1 --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/README-extra.md @@ -0,0 +1,44 @@ +# CM script to run and reproduce experiments + +Original repository: https://github.com/CMU-SAFARI/Victima + + +### Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install reusable MLCommons automations: + +```bash +cm pull repo mlcommons@ck +``` + +### Run Victima via CM interface + +The core CM script for Victima will be available under ```/CM/repos/mlcommons@ck/script/reproduce-micro-2023-paper-victima``` + +It is described by `_cm.yaml` and several native scripts. + +Perform the following steps to evaluate Victima with MLCommons CM automation language: + +1) This command will install system dependencies for Docker and require sudo (skip it if you have Docker installed): +```bash +cmr "reproduce paper micro 2023 victima _install_deps" +``` + +2) This command will prepare and run all experiments via Docker: + +```bash +cmr "reproduce paper micro 2023 victima _run" +``` + +You can specify --job_manager and --container if needed: +```bash +cmr "reproduce paper micro 2023 victima _run" --job_manager=native|slurm --contianer=docker|podman +``` + +3) In case of successful execution of a previous command, this command will generate plots to help you validate results from the article: + +```bash +cmr "reproduce paper micro 2023 victima _plot" +``` diff --git a/script/reproduce-micro-paper-2023-victima/README.md b/script/reproduce-micro-paper-2023-victima/README.md new file mode 100644 index 0000000000..99d1876d30 --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/README.md @@ -0,0 +1,179 @@ +
+Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Variations](#variations) + * [ Script flags mapped to environment](#script-flags-mapped-to-environment) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
+ +*Note that this README is automatically generated - don't edit!* + +### About + + +See extra [notes](README-extra.md) from the authors and contributors. + +#### Summary + +* Category: *Reproducibility and artifact evaluation.* +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/master/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *reproduce,project,paper,micro,micro-2023,victima* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@ck``` + + +#### Run this script from command line + +1. `cm run script --tags=reproduce,project,paper,micro,micro-2023,victima[,variations] [--input_flags]` + +2. `cmr "reproduce project paper micro micro-2023 victima[ variations]" [--input_flags]` + +* `variations` can be seen [here](#variations) + +* `input_flags` can be seen [here](#script-flags-mapped-to-environment) + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,project,paper,micro,micro-2023,victima' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,project,paper,micro,micro-2023,victima"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,project,paper,micro,micro-2023,victima) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce project paper micro micro-2023 victima[ variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_install_deps` + - Workflow: + * `_plot` + - Workflow: + * `_run` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--container=value` → `CM_VICTIMA_CONTAINER=value` +* `--job_manager=value` → `CM_VICTIMA_JOB_MANAGER=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "container":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_VICTIMA_JOB_MANAGER: `native` +* CM_VICTIMA_CONTAINER: `docker` + +
+ +___ +### Script workflow, dependencies and native scripts + +
+Click here to expand this section. + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo,_repo.https://github.com/CMU-SAFARI/Victima + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima/_cm.yaml) +
+ +___ +### Script output +`cmr "reproduce project paper micro micro-2023 victima[,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/script/reproduce-micro-paper-2023-victima/_cm.yaml b/script/reproduce-micro-paper-2023-victima/_cm.yaml new file mode 100644 index 0000000000..d20e5436a4 --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/_cm.yaml @@ -0,0 +1,37 @@ +alias: reproduce-micro-paper-2023-victima +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +default_env: + CM_VICTIMA_JOB_MANAGER: native + CM_VICTIMA_CONTAINER: docker +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: get,git,repo,_repo.https://github.com/CMU-SAFARI/Victima + env: + CM_GIT_ENV_KEY: 'CMU_SAFARI_VICTIMA' + extra_cache_tags: micro23,artifact,ae,cmu,safari,victima +input_mapping: + job_manager: CM_VICTIMA_JOB_MANAGER + container: CM_VICTIMA_CONTAINER +script_name: run +tags: +- reproduce +- project +- paper +- micro +- micro-2023 +- victima +uid: fc5bee3426174e7b +variations: + install_deps: + script_name: install_deps + plot: + script_name: plot + run: + script_name: run diff --git a/script/reproduce-micro-paper-2023-victima/customize.py b/script/reproduce-micro-paper-2023-victima/customize.py new file mode 100644 index 0000000000..d12f9b3e1d --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/reproduce-micro-paper-2023-victima/install_deps.sh b/script/reproduce-micro-paper-2023-victima/install_deps.sh new file mode 100644 index 0000000000..3458dd15bb --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/install_deps.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to Victima repo: ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH} + +if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 +fi + +echo "" + +sh install_docker.sh +test $? -eq 0 || exit 1 diff --git a/script/reproduce-micro-paper-2023-victima/main.py b/script/reproduce-micro-paper-2023-victima/main.py new file mode 100644 index 0000000000..d851f1450f --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print ('') + print ('Main script:') + print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) + print ('') + + exit(0) diff --git a/script/reproduce-micro-paper-2023-victima/plot.sh b/script/reproduce-micro-paper-2023-victima/plot.sh new file mode 100644 index 0000000000..50723da505 --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/plot.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to Victima repo: ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH} + +echo "" +sh ./scripts/produce_plots.sh ${CM_VICTIMA_CONTAINER} +test $? -eq 0 || exit 1 diff --git a/script/reproduce-micro-paper-2023-victima/run.sh b/script/reproduce-micro-paper-2023-victima/run.sh new file mode 100644 index 0000000000..541c728391 --- /dev/null +++ b/script/reproduce-micro-paper-2023-victima/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to Victima repo: ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH} + +echo "" + +sh artifact.sh --${CM_VICTIMA_JOB_MANAGER} ${CM_VICTIMA_CONTAINER} +test $? -eq 0 || exit 1 diff --git a/script/reproduce-micro-paper-2023-xyz/README.md b/script/reproduce-micro-paper-2023-xyz/README.md new file mode 100644 index 0000000000..e3beeea2ae --- /dev/null +++ b/script/reproduce-micro-paper-2023-xyz/README.md @@ -0,0 +1,178 @@ +
+Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Variations](#variations) + * [ Script flags mapped to environment](#script-flags-mapped-to-environment) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
+ +*Note that this README is automatically generated - don't edit!* + +### About + + +See extra [notes](README-extra.md) from the authors and contributors. + +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/master/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *reproduce,paper,micro,micro-2023,victima* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@ck``` + + +#### Run this script from command line + +1. `cm run script --tags=reproduce,paper,micro,micro-2023,victima[,variations] [--input_flags]` + +2. `cmr "reproduce paper micro micro-2023 victima[ variations]" [--input_flags]` + +* `variations` can be seen [here](#variations) + +* `input_flags` can be seen [here](#script-flags-mapped-to-environment) + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,paper,micro,micro-2023,victima' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,paper,micro,micro-2023,victima"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,paper,micro,micro-2023,victima) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce paper micro micro-2023 victima[ variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_install_deps` + - Workflow: + * `_plot` + - Workflow: + * `_run` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--container=value` → `CM_VICTIMA_CONTAINER=value` +* `--job_manager=value` → `CM_VICTIMA_JOB_MANAGER=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "container":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_VICTIMA_JOB_MANAGER: `native` +* CM_VICTIMA_CONTAINER: `docker` + +
+ +___ +### Script workflow, dependencies and native scripts + +
+Click here to expand this section. + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,git,repo,_repo.https://github.com/CMU-SAFARI/Victima + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-xyz/_cm.yaml) +
+ +___ +### Script output +`cmr "reproduce paper micro micro-2023 victima[,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/script/reproduce-mlperf-inference-dummy/README.md b/script/reproduce-mlperf-inference-dummy/README.md new file mode 100644 index 0000000000..44bb5f7e5a --- /dev/null +++ b/script/reproduce-mlperf-inference-dummy/README.md @@ -0,0 +1,381 @@ +
+Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Variations](#variations) + * [ Script flags mapped to environment](#script-flags-mapped-to-environment) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
+ +*Note that this README is automatically generated - don't edit!* + +### About + +#### Summary + +* Category: *Modular MLPerf benchmarks.* +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/master/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@ck``` + + +#### Run this script from command line + +1. `cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy[,variations] [--input_flags]` + +2. `cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[ variations]" [--input_flags]` + +* `variations` can be seen [here](#variations) + +* `input_flags` can be seen [here](#script-flags-mapped-to-environment) + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[ variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
+ Click here to expand this section. + + * `_bert_` + - Workflow: + * `_gptj_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,gptj + * CM names: `--adr.['gptj-model']...` + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-gptj) + * get,dataset,cnndm,_validation + - CM script: [get-dataset-cnndm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm) + * `_llama2-70b_` + - Workflow: + +
+ + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_pytorch,cpu` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_pytorch,cuda` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch_cuda + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * `_singlestream,resnet50` + - Workflow: + * `_singlestream,retinanet` + - Workflow: + +
+ + + * Group "**backend**" +
+ Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - Workflow: + +
+ + + * Group "**batch-size**" +
+ Click here to expand this section. + + * `_bs.#` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + +
+ + + * Group "**loadgen-scenario**" +
+ Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * `_offline` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + +
+ + + * Group "**model**" +
+ Click here to expand this section. + + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - Workflow: + * `_gptj-99` + - Environment variables: + - *CM_MODEL*: `gptj-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_gptj-99.9` + - Environment variables: + - *CM_MODEL*: `gptj-99.9` + - Workflow: + * `_llama2-70b-99` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99` + - Workflow: + * `_llama2-70b-99.9` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99.9` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - Workflow: + +
+ + + * Group "**precision**" +
+ Click here to expand this section. + + * `_fp16` + - Environment variables: + - *CM_MLPERF_MODEL_PRECISION*: `float16` + - Workflow: + * **`_fp32`** (default) + - Environment variables: + - *CM_MLPERF_MODEL_PRECISION*: `float32` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_MLPERF_MODEL_PRECISION*: `uint8` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_fp32,_pytorch,_resnet50` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--rerun=value` → `CM_RERUN=value` +* `--results_repo=value` → `CM_MLPERF_INFERENCE_RESULTS_REPO=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "count":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_MLPERF_LOADGEN_MODE: `performance` +* CM_SKIP_PREPROCESS_DATASET: `no` +* CM_SKIP_MODEL_DOWNLOAD: `no` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `dummy` +* CM_MLPERF_SKIP_RUN: `no` + +
+ +___ +### Script workflow, dependencies and native scripts + +
+Click here to expand this section. + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlcommons,inference,loadgen + * CM names: `--adr.['inference-loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-user-conf) + * get,generic-python-lib,_mlperf_logging + * CM names: `--adr.['mlperf-logging']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,git,repo + * CM names: `--adr.inference-results...` + - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-dummy/_cm.yaml)*** + * benchmark-mlperf + * `if (CM_MLPERF_SKIP_RUN not in ['yes', True])` + * CM names: `--adr.['runner', 'mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/save-mlperf-inference-implementation-state) +
+ +___ +### Script output +`cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_IMAGENET_ACCURACY_DTYPE` +* `CM_MAX_EXAMPLES` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +* `CM_SQUAD_ACCURACY_DTYPE` +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md b/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md new file mode 100644 index 0000000000..ab78e4a31f --- /dev/null +++ b/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md @@ -0,0 +1,13 @@ +This script reproduces OctoML MLPerf TinyML Submission from v1.0. +## Install +```bash +cm run script --tags=reproduce,tiny,mlperf,octoml,_[VARIANT],_[MODEL] +``` +where, +* `[VARIANT]` is one of `cmsis_nn`,`native` +* `[MODEL]` is one of `ad`, `ic`, `kws`, `vww` + +The generated binary can be located inside +```bash +find `cm find cache --tags=reproduce,tiny,mlperf,octoml,_[VARIANT],_[MODEL] +``` diff --git a/script/reproduce-mlperf-octoml-tinyml-results/README.md b/script/reproduce-mlperf-octoml-tinyml-results/README.md new file mode 100644 index 0000000000..cafdc5affe --- /dev/null +++ b/script/reproduce-mlperf-octoml-tinyml-results/README.md @@ -0,0 +1,215 @@ +Automatically generated README for this automation recipe: **reproduce-mlperf-octoml-tinyml-results** + +Category: **Reproduce MLPerf benchmarks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=reproduce-mlperf-octoml-tinyml-results,a63803a707d04332) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *reproduce,tiny,results,mlperf,octoml,mlcommons* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "reproduce tiny results mlperf octoml mlcommons" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=reproduce,tiny,results,mlperf,octoml,mlcommons` + +`cm run script --tags=reproduce,tiny,results,mlperf,octoml,mlcommons[,variations] [--input_flags]` + +*or* + +`cmr "reproduce tiny results mlperf octoml mlcommons"` + +`cmr "reproduce tiny results mlperf octoml mlcommons [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,tiny,results,mlperf,octoml,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,tiny,results,mlperf,octoml,mlcommons"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,tiny,results,mlperf,octoml,mlcommons) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce tiny results mlperf octoml mlcommons[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_NRF` + - Environment variables: + - *CM_TINY_BOARD*: `NRF5340DK` + - Workflow: + * `_NUCLEO` + - Environment variables: + - *CM_TINY_BOARD*: `NUCLEO_L4R5ZI` + - Workflow: + * `_ad` + - Environment variables: + - *CM_TINY_MODEL*: `ad` + - Workflow: + * `_cmsis_nn` + - Environment variables: + - *CM_MICROTVM_VARIANT*: `microtvm_cmsis_nn` + - Workflow: + * `_ic` + - Environment variables: + - *CM_TINY_MODEL*: `ic` + - Workflow: + * `_kws` + - Environment variables: + - *CM_TINY_MODEL*: `kws` + - Workflow: + * `_native` + - Environment variables: + - *CM_MICROTVM_VARIANT*: `microtvm_native` + - Workflow: + * `_vww` + - Environment variables: + - *CM_TINY_MODEL*: `vww` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--flash=value` → `CM_FLASH_BOARD=value` +* `--recreate_binary=value` → `CM_RECREATE_BINARY=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "flash":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `r1.0` + +* `r1.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python3', 'python']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,zephyr + * CM names: `--adr.['zephyr']...` + - CM script: [get-zephyr](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zephyr) + * get,zephyr-sdk + * CM names: `--adr.['zephyr-sdk']...` + - CM script: [get-zephyr-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zephyr-sdk) + * get,cmsis + * CM names: `--adr.['cmsis']...` + - CM script: [get-cmsis_5](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmsis_5) + * get,microtvm + * CM names: `--adr.['microtvm']...` + - CM script: [get-microtvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-microtvm) + * get,cmake + * CM names: `--adr.['cmake']...` + - CM script: [get-cmake](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cmake) + * get,gcc + - CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results/_cm.json)*** + * flash,tiny,mlperf + * `if (CM_FLASH_BOARD == True)` + - CM script: [flash-tinyml-binary](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/flash-tinyml-binary) + +___ +### Script output +`cmr "reproduce tiny results mlperf octoml mlcommons [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_TINY_*` +#### New environment keys auto-detected from customize + +* `CM_TINY_MODEL` \ No newline at end of file diff --git a/script/reproduce-mlperf-octoml-tinyml-results/_cm.json b/script/reproduce-mlperf-octoml-tinyml-results/_cm.json new file mode 100644 index 0000000000..3e9281dfb5 --- /dev/null +++ b/script/reproduce-mlperf-octoml-tinyml-results/_cm.json @@ -0,0 +1,140 @@ +{ + "alias": "reproduce-mlperf-octoml-tinyml-results", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Reproduce MLPerf benchmarks", + "cache": true, + "default_version": "r1.0", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "tags": "get,sys-utils-cm" + }, + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" + }, + { + "names": [ + "zephyr" + ], + "tags": "get,zephyr" + }, + { + "names": [ + "zephyr-sdk" + ], + "tags": "get,zephyr-sdk" + }, + { + "names": [ + "cmsis" + ], + "tags": "get,cmsis" + }, + { + "names": [ + "microtvm" + ], + "tags": "get,microtvm" + }, + { + "names": [ + "cmake" + ], + "tags": "get,cmake", + "version_min": "3.20.0" + }, + { + "tags": "get,gcc" + } + ], + "input_mapping": { + "flash": "CM_FLASH_BOARD", + "recreate_binary": "CM_RECREATE_BINARY" + }, + "local_env_keys": [ + "CM_*" + ], + "new_env_keys": [ + "CM_TINY_*" + ], + "post_deps": [ + { + "enable_if_env": { + "CM_FLASH_BOARD": [ + "True" + ] + }, + "tags": "flash,tiny,mlperf" + } + ], + "tags": [ + "reproduce", + "tiny", + "results", + "mlperf", + "octoml", + "mlcommons" + ], + "uid": "a63803a707d04332", + "variations": { + "NRF": { + "env": { + "CM_TINY_BOARD": "NRF5340DK" + } + }, + "NUCLEO": { + "env": { + "CM_TINY_BOARD": "NUCLEO_L4R5ZI" + } + }, + "ad": { + "env": { + "CM_TINY_MODEL": "ad" + } + }, + "cmsis_nn": { + "env": { + "CM_MICROTVM_VARIANT": "microtvm_cmsis_nn" + } + }, + "ic": { + "env": { + "CM_TINY_MODEL": "ic" + } + }, + "kws": { + "env": { + "CM_TINY_MODEL": "kws" + } + }, + "native": { + "env": { + "CM_MICROTVM_VARIANT": "microtvm_native" + } + }, + "vww": { + "env": { + "CM_TINY_MODEL": "vww" + } + } + }, + "versions": { + "r1.0": { + "add_deps_recursive": { + "microtvm": { + "version": "main" + } + } + } + } +} diff --git a/script/reproduce-mlperf-octoml-tinyml-results/customize.py b/script/reproduce-mlperf-octoml-tinyml-results/customize.py new file mode 100644 index 0000000000..f63d95abaa --- /dev/null +++ b/script/reproduce-mlperf-octoml-tinyml-results/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if 'CM_MICROTVM_VARIANT' not in env: + env['CM_MICROTVM_VARIANT'] = 'microtvm_cmsis_nn' + if 'CM_TINY_MODEL' not in env: + env['CM_TINY_MODEL'] = 'ic' + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env['+C_INCLUDE_PATH'] = [] + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/reproduce-mlperf-octoml-tinyml-results/dockerfiles/ubuntu_20.04.Dockerfile b/script/reproduce-mlperf-octoml-tinyml-results/dockerfiles/ubuntu_20.04.Dockerfile new file mode 100644 index 0000000000..5a1672f1d8 --- /dev/null +++ b/script/reproduce-mlperf-octoml-tinyml-results/dockerfiles/ubuntu_20.04.Dockerfile @@ -0,0 +1,17 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget +RUN python3 -m pip install cmind requests +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser +RUN cm pull repo ctuning@mlcommons-ck +RUN cm run script --quiet --tags=get,sys-utils-cm +RUN cm run script --quiet --tags=reproduce,tiny,octoml --env.CM_GH_TOKEN=$CM_GH_TOKEN diff --git a/script/reproduce-mlperf-octoml-tinyml-results/run.sh b/script/reproduce-mlperf-octoml-tinyml-results/run.sh new file mode 100644 index 0000000000..c8d2f077f0 --- /dev/null +++ b/script/reproduce-mlperf-octoml-tinyml-results/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +CUR_DIR=$PWD + +code=${CM_MICROTVM_SOURCE}/closed/OctoML/code +model=${CM_TINY_MODEL:-ad} +microtvm_variant=${CM_MICROTVM_VARIANT} +board=${CM_TINY_BOARD:-NUCLEO_L4R5ZI} +source=${code}/${microtvm_variant} + +path_suffix="${board}/${model}" +cmake_src=${source}/${path_suffix} +build_path=${CUR_DIR}/${path_suffix} +echo "CM_TINY_BUILD_DIR=${build_path}/build" > tmp-run-env.out +mkdir -p ${build_path} +cd ${build_path} +binary_path=${build_path}/build/zephyr/zephyr.elf +if [ -f "${binary_path}" ] && [ "${CM_RECREATE_BINARY}" != "True" ]; then + echo "ELF binary existing at ${binary_path}. Skipping regeneration." + cd build +else + rm -rf build + mkdir -p build + cd build + CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES:-2}} + cmake ${cmake_src} + test $? -eq 0 || exit 1 + make -j${CM_MAKE_CORES} + test $? -eq 0 || exit 1 + cd ../ + echo "ELF binary created at ${build_path}/build/zephyr/zephyr.elf" +fi diff --git a/script/reproduce-mlperf-training-nvidia/README.md b/script/reproduce-mlperf-training-nvidia/README.md new file mode 100644 index 0000000000..0364e7f6b5 --- /dev/null +++ b/script/reproduce-mlperf-training-nvidia/README.md @@ -0,0 +1,171 @@ +Automatically generated README for this automation recipe: **reproduce-mlperf-training-nvidia** + +Category: **Reproduce MLPerf benchmarks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=reproduce-mlperf-training-nvidia,f183628f292341e2) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf train training nvidia-training nvidia" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia` + +`cm run script --tags=reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia[,variations] [--input_flags]` + +*or* + +`cmr "reproduce mlcommons mlperf train training nvidia-training nvidia"` + +`cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce mlcommons mlperf train training nvidia-training nvidia[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * Group "**benchmark**" +
+ Click here to expand this section. + + * `_resnet` + - Environment variables: + - *CM_MLPERF_TRAINING_BENCHMARK*: `resnet` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * prepare,mlperf,training,resnet,_nvidia + * CM names: `--adr.['prepare-training-data', 'nvidia-training-data']...` + - CM script: [prepare-training-data-resnet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/prepare-training-data-resnet) + * get,nvidia,training,code + * CM names: `--adr.['nvidia-training-code']...` + - CM script: [get-mlperf-training-nvidia-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-training-nvidia-code) + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--results_dir=value` → `CM_MLPERF_RESULTS_DIR=value` +* `--system_conf_name=value` → `CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "results_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +* `r2.1` +* `r3.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,nvidia-docker + - CM script: [get-nvidia-docker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-nvidia-docker) + * get,cuda + * CM names: `--adr.['cuda']...` + - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/_cm.yaml) + 1. ***Run native script if exists*** + * [run-resnet.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/run-resnet.sh) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/reproduce-mlperf-training-nvidia/_cm.yaml) + +___ +### Script output +`cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/reproduce-mlperf-training-nvidia/_cm.yaml b/script/reproduce-mlperf-training-nvidia/_cm.yaml new file mode 100644 index 0000000000..b61b49555d --- /dev/null +++ b/script/reproduce-mlperf-training-nvidia/_cm.yaml @@ -0,0 +1,77 @@ +# Identification of this CM script +alias: reproduce-mlperf-training-nvidia +uid: f183628f292341e2 +cache: false + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Reproduce MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - train + - training + - nvidia-training + - nvidia + + +# Map script inputs to environment variables +input_mapping: + system_conf_name: CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME + results_dir: CM_MLPERF_RESULTS_DIR + +new_state_keys: + - mlperf-training-implementation + - CM_SUT_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,nvidia-docker + + # Detect CUDA + - names: + - cuda + tags: get,cuda + version: 11.7.0 + +variations: + resnet: + group: benchmark + env: + CM_MLPERF_TRAINING_BENCHMARK: resnet + deps: + - tags: prepare,mlperf,training,resnet,_nvidia + names: + - prepare-training-data + - nvidia-training-data + - tags: get,nvidia,training,code + names: + - nvidia-training-code + +versions: + r2.1: + adr: + nvidia-training-code: + version: r2.1 + env: + resnet_benchmark_implementation: mxnet-22.04 + r3.0: + adr: + nvidia-training-code: + version: r3.0 + env: + resnet_benchmark_implementation: mxnet diff --git a/script/reproduce-mlperf-training-nvidia/customize.py b/script/reproduce-mlperf-training-nvidia/customize.py new file mode 100644 index 0000000000..25e9929d82 --- /dev/null +++ b/script/reproduce-mlperf-training-nvidia/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os +import shutil + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + conf = env.get('CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME', '') + if conf == "": + return {'return':1, 'error': 'Please provide --system_conf_name='} + + if not conf.endswith(".sh"): + conf = conf + ".sh" + + if env.get('CM_MLPERF_TRAINING_BENCHMARK', '') == "resnet": + i['run_script_input']['script_name'] = "run-resnet" + + env['CONFIG_FILE'] = conf +# print(env) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/reproduce-mlperf-training-nvidia/run-resnet.sh b/script/reproduce-mlperf-training-nvidia/run-resnet.sh new file mode 100644 index 0000000000..d64cf068c3 --- /dev/null +++ b/script/reproduce-mlperf-training-nvidia/run-resnet.sh @@ -0,0 +1,16 @@ +#!/bin/bash +benchmark_implementation=${benchmark_implementation:-"mxnet-22.04"} +echo "cd ${CM_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation}" +cd ${CM_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation} +docker build --pull -t mlperf-nvidia:image_classification . +test $? -eq 0 || exit $? +echo "source ${CONFIG_FILE}" +source ${CONFIG_FILE} +test $? -eq 0 || exit $? + +DATADIR=${CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH} +echo "DATADIR=${CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH}" + +echo "CONT=mlperf-nvidia:image_classification DATADIR=${DATADIR} LOGDIR=${RESULTS_DIR} ./run_with_docker.sh" +CONT=mlperf-nvidia:image_classification DATADIR=${DATADIR} LOGDIR=${RESULTS_DIR} ./run_with_docker.sh +test $? -eq 0 || exit $? diff --git a/script/reproduce-mlperf-training-nvidia/run.sh b/script/reproduce-mlperf-training-nvidia/run.sh new file mode 100644 index 0000000000..ddcd0b5504 --- /dev/null +++ b/script/reproduce-mlperf-training-nvidia/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/script/run-all-mlperf-models/README.md b/script/run-all-mlperf-models/README.md new file mode 100644 index 0000000000..d464d23c95 --- /dev/null +++ b/script/run-all-mlperf-models/README.md @@ -0,0 +1,237 @@ +
+Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Variations](#variations) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
+ +*Note that this README is automatically generated - don't edit!* + +### About + +#### Summary + +* Category: *MLPerf benchmark support.* +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/master/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *run,natively,all,mlperf-models* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@ck``` + + +#### Run this script from command line + +1. `cm run script --tags=run,natively,all,mlperf-models[,variations] ` + +2. `cmr "run natively all mlperf-models[ variations]" ` + +* `variations` can be seen [here](#variations) + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,natively,all,mlperf-models' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,natively,all,mlperf-models"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,natively,all,mlperf-models) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run natively all mlperf-models[ variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_phoenix,reference` + - Workflow: + +
+ + + * Group "**implementation**" +
+ Click here to expand this section. + + * `_deepsparse` + - Environment variables: + - *DIVISION*: `open` + - *IMPLEMENTATION*: `deepsparse` + - Workflow: + * `_intel` + - Environment variables: + - *IMPLEMENTATION*: `intel` + - Workflow: + * `_mil` + - Environment variables: + - *IMPLEMENTATION*: `mil` + - Workflow: + * `_nvidia` + - Environment variables: + - *IMPLEMENTATION*: `nvidia` + - Workflow: + * `_qualcomm` + - Environment variables: + - *IMPLEMENTATION*: `qualcomm` + - Workflow: + * `_reference` + - Environment variables: + - *IMPLEMENTATION*: `reference` + - Workflow: + * `_tflite-cpp` + - Environment variables: + - *IMPLEMENTATION*: `tflite_cpp` + - Workflow: + +
+ + + * Group "**power**" +
+ Click here to expand this section. + + * **`_performance-only`** (default) + - Workflow: + * `_power` + - Environment variables: + - *POWER*: `True` + - Workflow: + +
+ + + * Group "**sut**" +
+ Click here to expand this section. + + * `_macbookpro-m1` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + * `_orin.32g` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + * `_phoenix` + - Environment variables: + - *CATEGORY*: `edge,datacenter` + - *DIVISION*: `closed` + - Workflow: + * `_sapphire-rapids.24c` + - Environment variables: + - *CATEGORY*: `edge,datacenter` + - *DIVISION*: `closed` + - Workflow: + +
+ + +#### Default variations + +`_performance-only` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Script workflow, dependencies and native scripts + +
+Click here to expand this section. + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/_cm.yaml) + 1. ***Run native script if exists*** + * [run-bert-macos.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-bert-macos.sh) + * [run-bert.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-bert.sh) + * [run-cpp-implementation.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-cpp-implementation.sh) + * [run-mobilenet-models.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-mobilenet-models.sh) + * [run-nvidia-4090.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-nvidia-4090.sh) + * [run-nvidia-a100.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-nvidia-a100.sh) + * [run-nvidia-t4.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-nvidia-t4.sh) + * [run-pruned-bert.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-pruned-bert.sh) + * [run-reference-models.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-reference-models.sh) + * [run-resnet50-macos.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-resnet50-macos.sh) + * [run-resnet50.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/run-resnet50.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-all-mlperf-models/_cm.yaml) +
+ +___ +### Script output +`cmr "run natively all mlperf-models[,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/script/run-all-mlperf-models/_cm.yaml b/script/run-all-mlperf-models/_cm.yaml new file mode 100644 index 0000000000..a53b73a193 --- /dev/null +++ b/script/run-all-mlperf-models/_cm.yaml @@ -0,0 +1,130 @@ +uid: 8d3cd46f54464810 +alias: run-all-mlperf-models + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: MLPerf benchmark support + +tags: +- run +- natively +- all +- mlperf-models + +variations: + + mil: + group: implementation + env: + IMPLEMENTATION: mil + default_env: + MODELS: resnet50,retinanet + BACKENDS: onnxruntime + DEVICES: cpu,cuda + + reference: + group: implementation + env: + IMPLEMENTATION: reference + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + CATEGORY: edge + + nvidia: + group: implementation + env: + IMPLEMENTATION: nvidia + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,rnnt,gptj-99,gptj-99.9,dlrmv2-99,dlrmv2-99.9 + BACKENDS: tensorrt + DEVICES: cuda + + qualcomm: + group: implementation + env: + IMPLEMENTATION: qualcomm + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9 + DIVISION: closed + BACKENDS: glow + DEVICES: qaic + + intel: + group: implementation + env: + IMPLEMENTATION: intel + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + DIVISION: closed + BACKENDS: pytorch + DEVICES: cpu + + deepsparse: + group: implementation + env: + DIVISION: open + IMPLEMENTATION: deepsparse + default_env: + MODELS: bert-99 + BACKENDS: deepsparse + DEVICES: cpu + + tflite-cpp: + group: implementation + env: + IMPLEMENTATION: tflite_cpp + default_env: + MODELS: mobilenet,efficientnet + CATEGORY: edge + DIVISION: open + BACKENDS: tflite + DEVICES: cpu + + performance-only: + group: power + default: true + + power: + group: power + env: + POWER: yes + default_env: + POWER_SERVER_IP: 192.168.0.15 + POWER_SERVER_PORT: 4950 + + + phoenix: + group: sut + env: + CATEGORY: edge,datacenter + DIVISION: closed + state: + resnet50: + cpu: + onnxruntime: + offline_target_qps: 250 + + phoenix,reference: + default_env: + DEVICES: cpu,cuda + + orin.32g: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + sapphire-rapids.24c: + group: sut + env: + CATEGORY: edge,datacenter + DIVISION: closed + + macbookpro-m1: + group: sut + env: + CATEGORY: edge + DIVISION: closed diff --git a/script/run-all-mlperf-models/customize.py b/script/run-all-mlperf-models/customize.py new file mode 100644 index 0000000000..40f0fced40 --- /dev/null +++ b/script/run-all-mlperf-models/customize.py @@ -0,0 +1,103 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + meta = i['meta'] + script_path = i['run_script_input']['path'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + models = env['MODELS'].split(",") + + backends = env.get('BACKENDS') + if backends: + backends = backends.split(",") + + devices = env.get('DEVICES') + if devices: + devices = devices.split(",") + + print(backends) + implementation = env['IMPLEMENTATION'] + + power = env.get('POWER', '') + + if str(power).lower() in [ "yes", "true" ]: + POWER_STRING = " --power yes --adr.mlperf-power-client.power_server=" + env.get('POWER_SERVER', '192.168.0.15') + " --adr.mlperf-power-client.port=" + env.get('POWER_SERVER_PORT', '4950') + " " + else: + POWER_STRING = "" + + if not devices: + return {'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} + + for model in models: + env['MODEL'] = model + cmds = [] + run_script_content = '#!/bin/bash\nsource '+ os.path.join(script_path, "run-template.sh") + + if not backends: + if implementation == "reference": + if model == "resnet50": + backends = "tf,onnxruntime" + elif model == "retinanet": + backends = "onnxruntime,pytorch" + elif "bert" in model: + backends = "tf,onnxruntime,pytorch" + elif "3d-unet" in model: + backends = "tf,onnxruntime,pytorch" + elif model == "rnnt": + backends = "pytorch" + elif "gptj" in model: + backends = "pytorch" + elif "stable-diffusion-xl" in model: + backends = "pytorch" + elif "llama2-70b" in model: + backends = "pytorch" + backends = backends.split(",") + + for backend in backends: + + for device in devices: + offline_target_qps = (((state.get(model, {})).get(device, {})).get(backend, {})).get('offline_target_qps') + if offline_target_qps: + pass + else: #try to do a test run with reasonable number of samples to get and record the actual system performance + if device == "cpu": + if model == "resnet50": + test_query_count = 1000 + else: + test_query_count = 100 + else: + if model == "resnet50": + test_query_count = 10000 + else: + test_query_count = 1000 + cmd = f'run_test "{backend}" "{test_query_count}" "{implementation}" "{device}" "$find_performance_cmd"' + cmds.append(cmd) + #second argument is unused for submission_cmd + cmd = f'run_test "{backend}" "100" "{implementation}" "{device}" "$submission_cmd"' + cmds.append(cmd) + run_file_name = 'tmp-'+model+'-run' + run_script_content += "\n\n" +"\n\n".join(cmds) + with open(os.path.join(script_path, run_file_name+".sh"), 'w') as f: + f.write(run_script_content) + print(cmds) + + + + + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/run-all-mlperf-models/run-bert-macos.sh b/script/run-all-mlperf-models/run-bert-macos.sh new file mode 100644 index 0000000000..5d46fd1131 --- /dev/null +++ b/script/run-all-mlperf-models/run-bert-macos.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="bert-99" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power="" +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "100" "reference" "cpu" "$find_performance_cmd" +run_test "tf" "100" "reference" "cpu" "$find_performance_cmd" +run_test "pytorch" "200" "reference" "cpu" "$find_performance_cmd" + +scenario="SingleStream" +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd_scenario" +run_test "tf" "100" "reference" "cpu" "$submission_cmd_scenario" +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario="Offline" +division="closed" +run_test "tf" "100" "reference" "cpu" "$submission_cmd_scenario" diff --git a/script/run-all-mlperf-models/run-bert.sh b/script/run-all-mlperf-models/run-bert.sh new file mode 100644 index 0000000000..08cddaddef --- /dev/null +++ b/script/run-all-mlperf-models/run-bert.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="bert-99" +device="cpu" +category="edge" +rerun="$rerun" + +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "20" "reference" "cpu" "$find_performance_cmd" +run_test "tf" "20" "reference" "cpu" "$find_performance_cmd" +run_test "pytorch" "200" "reference" "cpu" "$find_performance_cmd" +run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "tf" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "pytorch" "10000" "reference" "cuda" "$find_performance_cmd" + +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" +#run_test "tf" "100" "reference" "cpu" "$submission_cmd" +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd" +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd " +run_test "tf" "100" "reference" "cuda" "$submission_cmd" +run_test "pytorch" "100" "reference" "cuda" "$submission_cmd" + diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh new file mode 100644 index 0000000000..704abff2d7 --- /dev/null +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +#Add your run commands here... +# run "$CM_RUN_CMD" + +POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " +POWER="" + +run "cm run script --tags=set,system,performance,mode" + +#cpp +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=2000 " + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--category=edge --division=open --scenario=Offline --quiet" + + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=Offline \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=Offline \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +# GPU + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--test_query_count=20000 \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--test_query_count=2000 \ +--category=edge --division=open --scenario=Offline --quiet" + + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--scenario=Offline \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=Offline \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--scenario=Offline \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +#multistream +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--scenario=Offline \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=MultiStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=MultiStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh new file mode 100644 index 0000000000..41497d56d2 --- /dev/null +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4940 " +POWER="" +extra_option="" +extra_tags="" +#extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on" +#extra_tags=",_only-fp32" + + +#Add your run commands here... +# run "$CM_RUN_CMD" +run "cm run script --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +${POWER} \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +${POWER} \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ +${POWER} \ +${extra_option} \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ +${POWER} \ +${extra_option} \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/results_dir" diff --git a/script/run-all-mlperf-models/run-nvidia-4090.sh b/script/run-all-mlperf-models/run-nvidia-4090.sh new file mode 100644 index 0000000000..033fa9d9ed --- /dev/null +++ b/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +function run_model() { + model="$1" + test_query_count="$2" + run "$3" +} +division="open" +division="closed" +device="cuda" +backend="tensorrt" +implementation="nvidia-original" +category="datacenter-edge" +category="edge" +power="" +power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" +#Add your run commands here... +# run "$CM_RUN_CMD" +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' + +#run "resnet50" "100000" "${find_performance_cmd}" +#run "retinanet" "10000" "${find_performance_cmd}" +#run "rnnt" "100000" "${find_performance_cmd}" +#run "bert-99" "20000" "${find_performance_cmd}" +#run "3d-unet" "30" "${find_performance_cmd}" + + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --execution-mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ +--category=$category --division=$division --skip_submission_generation=yes --quiet $power' + +#run_model "bert-99.9" "10" "${submission_cmd} --offline_target_qps=1680 --server_target_qps=1520" +run_model "resnet50" "10" "${submission_cmd} --offline_target_qps=45000 --server_target_qps=38000 --singlestream_target_latency=0.2 --multistream_target_latency=0.4" +run_model "rnnt" "10" "${submission_cmd} --offline_target_qps=15200 --server_target_qps=14150 --singlestream_target_latency=23" +run_model "retinanet" "10" "${submission_cmd} --offline_target_qps=620 --server_target_qps=590 --singlestream_target_latency=2 --multistream_target_latency=14" +run_model "bert-99" "10" "${submission_cmd} --offline_target_qps=4100 --server_target_qps=3950 --singlestream_target_latency=1" +run_model "3d-unet-99.9" "10" "${submission_cmd} --offline_target_qps=4 --singlestream_target_latency=433 --env.CM_MLPERF_USE_MAX_DURATION=no" diff --git a/script/run-all-mlperf-models/run-nvidia-a100.sh b/script/run-all-mlperf-models/run-nvidia-a100.sh new file mode 100644 index 0000000000..4b5fb40fcf --- /dev/null +++ b/script/run-all-mlperf-models/run-nvidia-a100.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +function run_model() { + model="$1" + test_query_count="$2" + run "$3" +} +division="closed" +device="cuda" +backend="tensorrt" +implementation="nvidia-original" +category="edge" +power="" +connection_type="sxm" + +#Add your run commands here... +# run "$CM_RUN_CMD" +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' + +run "resnet50" "100000" "${find_performance_cmd}" +run "retinanet" "10000" "${find_performance_cmd}" +run "rnnt" "100000" "${find_performance_cmd}" +run "bert-99" "20000" "${find_performance_cmd}" +run "3d-unet-99.9" "30" "${find_performance_cmd}" + + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --adr.nvidia-harensss.tags=_${connection_type} $power' + +run "resnet50" "10" "${submission_cmd}" +run "retinanet" "10" "${submission_cmd}" +run "rnnt" "10" "${submission_cmd}" +run "bert-99" "10" "${submission_cmd}" +run "3d-unet-99.9" "10" "${submission_cmd} --env.CM_MLPERF_USE_MAX_DURATION='no'" diff --git a/script/run-all-mlperf-models/run-nvidia-t4.sh b/script/run-all-mlperf-models/run-nvidia-t4.sh new file mode 100644 index 0000000000..835c1adadf --- /dev/null +++ b/script/run-all-mlperf-models/run-nvidia-t4.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +function run_model() { + model="$1" + test_query_count="$2" + run "$3" +} +division="closed" +device="cuda" +backend="tensorrt" +implementation="nvidia-original" +category="edge,datacenter" + +#Add your run commands here... +# run "$CM_RUN_CMD" +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' + +run "resnet50" "30000" "${find_performance_cmd}" +run "retinanet" "2000" "${find_performance_cmd}" +run "rnnt" "20000" "${find_performance_cmd}" +run "bert-99" "10000" "${find_performance_cmd}" +run "bert-99.9" "5000" "${find_performance_cmd}" +run "3d-unet" "10" "${find_performance_cmd}" + + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet' + +run "resnet50" "10" "${submission_cmd}" +run "retinanet" "10" "${submission_cmd}" +run "rnnt" "10" "${submission_cmd}" +run "bert-99" "10" "${submission_cmd}" +run "bert-99.9" "10" "${submission_cmd}" +run "3d-unet" "10" "${submission_cmd}" diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh new file mode 100644 index 0000000000..8c6d8bd1db --- /dev/null +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +#not working +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none" \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none" \ +#zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90-none \ +#zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/base_quant-none \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85-none" \ +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/base_quant-none" \ +#"zoo:nlp/question_answering/oberta-medium/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/roberta-large/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned95-none" \ +#"zoo:nlp/question_answering/distilbert-none/pytorch/huggingface/squad/pruned90-none" \ +#"zoo:nlp/question_answering/oberta-small/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/base_quant-none" \ +#"zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none" \ + +zoo_stub_list=( \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none" \ +"zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni" \ +"zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none" \ +"zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none" \ +"zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none" \ +"zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none" \ +) + +rerun="" +power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --env.CM_MLPERF_SKIP_POWER_CHECKS=yes" +power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" +power="" +max_batchsize=128 +max_batchsize=1 +scenario="Offline" +scenario="SingleStream" + +if [[ $scenario == "Offline" ]]; then +for stub in ${zoo_stub_list[@]}; do +cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ + --adr.python.version_min=3.8 \ + --implementation=reference \ + --model=bert-99 \ + --precision=int8 \ + --backend=deepsparse \ + --device=cpu \ + --scenario=Offline \ + --test_query_count=15000 \ + --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ + --results_dir=$HOME/results_dir \ + --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + ${rerun} \ + --quiet" + echo ${cmd} + eval ${cmd} +done +fi + +for stub in ${zoo_stub_list[@]}; do + cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission \ + --adr.python.version_min=3.8 \ + --adr.compiler.tags=gcc \ + --implementation=reference \ + --model=bert-99 \ + --precision=int8 \ + --backend=deepsparse \ + --device=cpu \ + --scenario=$scenario \ + --execution_mode=valid \ + --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ + ${power} \ + --results_dir=$HOME/results_dir \ + --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + --quiet" + echo ${cmd} + eval ${cmd} +done diff --git a/script/run-all-mlperf-models/run-reference-models.sh b/script/run-all-mlperf-models/run-reference-models.sh new file mode 100644 index 0000000000..41898f1450 --- /dev/null +++ b/script/run-all-mlperf-models/run-reference-models.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +#Add your run commands here... +# run "$CM_RUN_CMD" +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=100" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=rnnt --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=retinanet --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=rnnt --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=retinanet --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + diff --git a/script/run-all-mlperf-models/run-resnet50-macos.sh b/script/run-all-mlperf-models/run-resnet50-macos.sh new file mode 100644 index 0000000000..8d00ddc79c --- /dev/null +++ b/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="resnet50" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power="" +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "6000" "reference" "cpu" "$find_performance_cmd --rerun" +run_test "tf" "6000" "reference" "cpu" "$find_performance_cmd --rerun" + +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" +run_test "tf" "100" "reference" "cpu" "$submission_cmd" + diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh new file mode 100644 index 0000000000..df2789d8ce --- /dev/null +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="resnet50" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "200" "reference" "cpu" "$find_performance_cmd" +run_test "tf" "200" "reference" "cpu" "$find_performance_cmd" +run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "tf" "20000" "reference" "cuda" "$find_performance_cmd" + +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" +run_test "tf" "100" "reference" "cpu" "$submission_cmd" +scenario="SingleStream" +run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc" +run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc --adr.mlperf-inference-implementation.compressed_dataset=on" +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd " +scenario="Offline" +run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario="SingleStream" +run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" + +run_test "onnxruntime" "100" "reference" "cpu" "$readme_cmd" +run_test "tf" "100" "reference" "cpu" "$readme_cmd" +run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream" +run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream --adr.mlperf-inference-implementation.compressed_dataset=on" +run_test "onnxruntime" "100" "reference" "cuda" "$readme_cmd --scenario=SingleStream" +run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=SingleStream" +run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=Offline" diff --git a/script/run-all-mlperf-models/run-retinanet-sh b/script/run-all-mlperf-models/run-retinanet-sh new file mode 100644 index 0000000000..6f0bac9c5d --- /dev/null +++ b/script/run-all-mlperf-models/run-retinanet-sh @@ -0,0 +1,86 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="retinanet" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "50" "reference" "cpu" "$find_performance_cmd" +run_test "pytorch" "100" "reference" "cpu" "$find_performance_cmd" +run_test "onnxruntime" "1000" "reference" "cuda" "$find_performance_cmd" +run_test "pytorch" "1000" "reference" "cuda" "$find_performance_cmd" + +scenario=SingleStream +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=Offline +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=SingleStream +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=Offline +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=SingleStream +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario=Offline +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario=SingleStream +run_test "pytorch" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario=Offline +run_test "pytorch" "100" "reference" "cuda" "$submission_cmd_scenario" + diff --git a/script/run-all-mlperf-models/template.sh b/script/run-all-mlperf-models/template.sh new file mode 100644 index 0000000000..42ecda5ad9 --- /dev/null +++ b/script/run-all-mlperf-models/template.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division=$DIVISION +model=$MODEL +device=$DEVICE +category=$CATEGORY +rerun=$RERUN + +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} + +#power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power=${POWER_STRING} + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + diff --git a/script/run-docker-container/README-extra.md b/script/run-docker-container/README-extra.md new file mode 100644 index 0000000000..8fd2604726 --- /dev/null +++ b/script/run-docker-container/README-extra.md @@ -0,0 +1,15 @@ +This script runs a docker container and launces the given CM script inside it. +If the container image is not existing, corresponding build is initiated via CM dependencies. + +## How to Run +```bash +cm run script \ +--tags=run,docker,container +``` +### Options +1. `--script_tags="get,gcc"`: Script tags for the CM script to be run inside the docker container. + If this is not set the cm command run inside the docker container is `cm version` +2. `--cm_repo=ctuning@mlcommons-ck`: To use a different repo for CM scripts like "ctuning@mlcommons-ck". Default: `mlcommons@ck` +3. `--base="ubuntu:22.04"`: Specify the base image for Dockerfile. Default: "ubuntu:20.04" +4. `--recreate=yes`: To recreate docker image even when existing. Default: "no" +5. `--adr.build-docker-image.tags=_cache`: To use build cache for docker image build. Default: "" (`nocache`) diff --git a/script/run-docker-container/README.md b/script/run-docker-container/README.md new file mode 100644 index 0000000000..4c23be33eb --- /dev/null +++ b/script/run-docker-container/README.md @@ -0,0 +1,166 @@ +Automatically generated README for this automation recipe: **run-docker-container** + +Category: **Docker automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-docker-container,1e0c884107514b46) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-docker-container)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,docker,container* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run docker container" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,docker,container` + +`cm run script --tags=run,docker,container [--input_flags]` + +*or* + +`cmr "run docker container"` + +`cmr "run docker container " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,docker,container' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,docker,container"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,docker,container) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run docker container" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--all_gpus=value` → `CM_DOCKER_ADD_ALL_GPUS=value` +* `--base=value` → `CM_DOCKER_IMAGE_BASE=value` +* `--cache=value` → `CM_DOCKER_CACHE=value` +* `--cm_repo=value` → `CM_MLOPS_REPO=value` +* `--detached=value` → `CM_DOCKER_DETACHED_MODE=value` +* `--device=value` → `CM_DOCKER_ADD_DEVICE=value` +* `--docker_image_base=value` → `CM_DOCKER_IMAGE_BASE=value` +* `--docker_os=value` → `CM_DOCKER_OS=value` +* `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` +* `--extra_run_args=value` → `CM_DOCKER_EXTRA_RUN_ARGS=value` +* `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value` +* `--gh_token=value` → `CM_GH_TOKEN=value` +* `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value` +* `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` +* `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` +* `--interactive=value` → `CM_DOCKER_INTERACTIVE_MODE=value` +* `--it=value` → `CM_DOCKER_INTERACTIVE=value` +* `--mounts=value` → `CM_DOCKER_VOLUME_MOUNTS=value` +* `--pass_user_group=value` → `CM_DOCKER_PASS_USER_GROUP=value` +* `--port_maps=value` → `CM_DOCKER_PORT_MAPS=value` +* `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` +* `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` +* `--real_run=value` → `CM_REAL_RUN=value` +* `--recreate=value` → `CM_DOCKER_IMAGE_RECREATE=value` +* `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value` +* `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value` +* `--save_script=value` → `CM_DOCKER_SAVE_SCRIPT=value` +* `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` +* `--shm_size=value` → `CM_DOCKER_SHM_SIZE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "all_gpus":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_DOCKER_DETACHED_MODE: `yes` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-docker-container/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-docker-container/customize.py)*** + 1. ***Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-docker-container/_cm.json)*** + * build,docker,image + * `if (CM_DOCKER_IMAGE_EXISTS != yes)` + * CM names: `--adr.['build-docker-image']...` + - CM script: [build-docker-image](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-docker-image) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-docker-container/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-docker-container/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-docker-container/_cm.json) + +___ +### Script output +`cmr "run docker container " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-docker-container/_cm.json b/script/run-docker-container/_cm.json new file mode 100644 index 0000000000..30d490bf5b --- /dev/null +++ b/script/run-docker-container/_cm.json @@ -0,0 +1,61 @@ +{ + "alias": "run-docker-container", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Docker automation", + "cache": false, + "clean_files": [], + "default_env": { + "CM_DOCKER_DETACHED_MODE": "yes" + }, + "input_mapping": { + "interactive": "CM_DOCKER_INTERACTIVE_MODE", + "base": "CM_DOCKER_IMAGE_BASE", + "cm_repo": "CM_MLOPS_REPO", + "recreate": "CM_DOCKER_IMAGE_RECREATE", + "gh_token": "CM_GH_TOKEN", + "it":"CM_DOCKER_INTERACTIVE", + "fake_run_option": "CM_DOCKER_FAKE_RUN_OPTION", + "detached": "CM_DOCKER_DETACHED_MODE", + "image_repo": "CM_DOCKER_IMAGE_REPO", + "image_name": "CM_DOCKER_IMAGE_NAME", + "image_tag": "CM_DOCKER_IMAGE_TAG", + "docker_os": "CM_DOCKER_OS", + "docker_os_version": "CM_DOCKER_OS_VERSION", + "docker_image_base": "CM_DOCKER_IMAGE_BASE", + "script_tags": "CM_DOCKER_RUN_SCRIPT_TAGS", + "run_cmd_extra": "CM_DOCKER_RUN_CMD_EXTRA", + "real_run": "CM_REAL_RUN", + "run_cmd": "CM_DOCKER_RUN_CMD", + "pre_run_cmds": "CM_DOCKER_PRE_RUN_COMMANDS", + "post_run_cmds": "CM_DOCKER_POST_RUN_COMMANDS", + "pass_user_group": "CM_DOCKER_PASS_USER_GROUP", + "mounts": "CM_DOCKER_VOLUME_MOUNTS", + "port_maps": "CM_DOCKER_PORT_MAPS", + "shm_size": "CM_DOCKER_SHM_SIZE", + "extra_run_args": "CM_DOCKER_EXTRA_RUN_ARGS", + "device": "CM_DOCKER_ADD_DEVICE", + "cache": "CM_DOCKER_CACHE", + "all_gpus": "CM_DOCKER_ADD_ALL_GPUS", + "save_script": "CM_DOCKER_SAVE_SCRIPT" + }, + "prehook_deps": [ + { + "names": [ + "build-docker-image" + ], + "skip_if_env": { + "CM_DOCKER_IMAGE_EXISTS": [ + "yes" + ] + }, + "tags": "build,docker,image" + } + ], + "tags": [ + "run", + "docker", + "container" + ], + "uid": "1e0c884107514b46" +} diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py new file mode 100644 index 0000000000..f01a6e997b --- /dev/null +++ b/script/run-docker-container/customize.py @@ -0,0 +1,256 @@ +from cmind import utils +import cmind as cm +import os +import subprocess +from os.path import exists + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + + interactive = env.get('CM_DOCKER_INTERACTIVE_MODE','') + + if interactive: + env['CM_DOCKER_DETACHED_MODE']='no' + + if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: + env['CM_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" + CM_RUN_CMD="cm version" + else: + CM_RUN_CMD="cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' + + # Updating Docker info + update_docker_info(env) + + docker_image_repo = env['CM_DOCKER_IMAGE_REPO'] + docker_image_base = env['CM_DOCKER_IMAGE_BASE'] + docker_image_name = env['CM_DOCKER_IMAGE_NAME'] + docker_image_tag = env['CM_DOCKER_IMAGE_TAG'] + + r = cm.access({'action':'search', + 'automation':'script', + 'tags': env['CM_DOCKER_RUN_SCRIPT_TAGS']}) + if len(r['list']) < 1: + raise Exception('CM script with tags '+ env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' not found!') + + PATH = r['list'][0].path + os.chdir(PATH) + + env['CM_DOCKER_RUN_CMD'] = CM_RUN_CMD + + DOCKER_CONTAINER = docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag + + CMD = "docker images -q " + DOCKER_CONTAINER + + if os_info['platform'] == 'windows': + CMD += " 2> nul" + else: + CMD += " 2> /dev/null" + + print ('') + print ('Checking Docker images:') + print (CMD) + print ('') + + try: + docker_image = subprocess.check_output(CMD, shell=True).decode("utf-8") + except Exception as e: + return {'return':1, 'error':'Docker is either not installed or not started:\n{}'.format(e)} + + recreate_image = env.get('CM_DOCKER_IMAGE_RECREATE', '') + + if docker_image and recreate_image != "yes": + print("Docker image exists with ID: " + docker_image) + env['CM_DOCKER_IMAGE_EXISTS'] = "yes" + + elif recreate_image == "yes": + env['CM_DOCKER_IMAGE_RECREATE'] = "no" + + return {'return':0} + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + + # Updating Docker info + update_docker_info(env) + + docker_image_repo = env['CM_DOCKER_IMAGE_REPO'] + docker_image_base = env['CM_DOCKER_IMAGE_BASE'] + docker_image_name = env['CM_DOCKER_IMAGE_NAME'] + docker_image_tag = env['CM_DOCKER_IMAGE_TAG'] + + run_cmds = [] + mount_cmds = [] + port_map_cmds = [] + run_opts = '' + + if env.get('CM_DOCKER_PRE_RUN_COMMANDS', []): + for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: + run_cmds.append(pre_run_cmd) + + if env.get('CM_DOCKER_VOLUME_MOUNTS', []): + for mounts in env['CM_DOCKER_VOLUME_MOUNTS']: + mount_cmds.append(mounts) + + if env.get('CM_DOCKER_PASS_USER_GROUP', '') != '': + run_opts += " --group-add $(id -g $USER) " + + if env.get('CM_DOCKER_ADD_DEVICE', '') != '': + run_opts += " --device="+env['CM_DOCKER_ADD_DEVICE'] + + if env.get('CM_DOCKER_ADD_ALL_GPUS', '') != '': + run_opts += " --gpus=all" + + if env.get('CM_DOCKER_SHM_SIZE', '') != '': + run_opts += " --shm-size={}".format(env['CM_DOCKER_SHM_SIZE']) + + if env.get('CM_DOCKER_EXTRA_RUN_ARGS', '') != '': + run_opts += env['CM_DOCKER_EXTRA_RUN_ARGS'] + + if env.get('CM_DOCKER_PORT_MAPS', []): + for ports in env['CM_DOCKER_PORT_MAPS']: + port_map_cmds.append(ports) + + run_cmd = env['CM_DOCKER_RUN_CMD'] + " " +env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":","=") + run_cmds.append(run_cmd) + if 'CM_DOCKER_POST_RUN_COMMANDS' in env: + for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: + run_cmds.append(post_run_cmd) + + run_cmd = " && ".join(run_cmds) + run_cmd = run_cmd.replace("--docker_run_deps", "") + + if mount_cmds: + for mount_cmd in mount_cmds: + + # Since windows may have 2 :, we search from the right + j = mount_cmd.rfind(':') + if j>0: + mount_parts = [mount_cmd[:j], mount_cmd[j+1:]] + else: + return {'return':1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount_cmd)} + +# mount_parts = mount_cmd.split(":") +# if len(mount_parts) != 2: +# return {'return': 1, 'error': 'Invalid mount {} specified'.format(mount_parts)} + + host_mount = mount_parts[0] + if not os.path.exists(host_mount): + os.makedirs(host_mount) + + mount_cmd_string = " -v " + " -v ".join(mount_cmds) + else: + mount_cmd_string = '' + run_opts += mount_cmd_string + + if port_map_cmds: + port_map_cmd_string = " -p " + "-p ".join(port_map_cmds) + else: + port_map_cmd_string = '' + + run_opts += port_map_cmd_string + + # Currently have problem running Docker in detached mode on Windows: + detached = env.get('CM_DOCKER_DETACHED_MODE','') in ['yes', 'True', True] +# if detached and os_info['platform'] != 'windows': + if detached: + if os_info['platform'] == 'windows': + return {'return':1, 'error':'Currently we don\'t support running Docker containers in detached mode on Windows - TBD'} + + CONTAINER="docker run -dt "+ run_opts + " --rm " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag + " bash" + CMD = "ID=`" + CONTAINER + "` && docker exec $ID bash -c '" + run_cmd + "' && docker kill $ID >/dev/null" + + print ('') + print ("Container launch command:") + print ('') + print (CMD) + print ('') + print ("Running "+run_cmd+" inside docker container") + + record_script({'cmd':CMD, 'env': env}) + + print ('') + docker_out = subprocess.check_output(CMD, shell=True).decode("utf-8") + + print(docker_out) + + else: + x = "'" + if os_info['platform'] == 'windows': + x = '"' + + x1 = '' + x2 = '' + if env.get('CM_DOCKER_INTERACTIVE_MODE', '') in ['yes', 'True', True]: + x1 = '-it' + x2 = " && bash " + + + CONTAINER="docker run " + x1 + " --entrypoint " + x + x + " " + run_opts + " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag + CMD = CONTAINER + " bash -c " + x + run_cmd + x2 + x + + print ('') + print ("Container launch command:") + print ('') + print (CMD) + + record_script({'cmd':CMD, 'env': env}) + + print ('') + docker_out = os.system(CMD) + + return {'return':0} + +def record_script(i): + + cmd = i['cmd'] + env = i['env'] + + files = [] + + dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + if dockerfile_path != '' and os.path.isfile(dockerfile_path): + files.append(dockerfile_path + '.run.bat') + files.append(dockerfile_path + '.run.sh') + + save_script = env.get('CM_DOCKER_SAVE_SCRIPT', '') + if save_script != '': + if save_script.endswith('.bat') or save_script.endswith('.sh'): + files.append(save_script) + else: + files.append(save_script+'.bat') + files.append(save_script+'.sh') + + for filename in files: + with open (filename, 'w') as f: + f.write(cmd + '\n') + + return {'return':0} + +def update_docker_info(env): + # Updating Docker info + docker_image_repo = env.get('CM_DOCKER_IMAGE_REPO', 'cknowledge') + env['CM_DOCKER_IMAGE_REPO'] = docker_image_repo + + docker_image_base = env.get('CM_DOCKER_IMAGE_BASE') + if not docker_image_base: + if env.get("CM_DOCKER_OS", '') != '': + docker_image_base = env["CM_DOCKER_OS"]+":"+env["CM_DOCKER_OS_VERSION"] + else: + docker_image_base = "ubuntu:22.04" + env['CM_DOCKER_IMAGE_BASE'] = docker_image_base + + docker_image_name = env.get('CM_DOCKER_IMAGE_NAME', 'cm-script-'+env['CM_DOCKER_RUN_SCRIPT_TAGS'].replace(',', '-').replace('_','')) + env['CM_DOCKER_IMAGE_NAME'] = docker_image_name + + docker_image_tag = env.get('CM_DOCKER_IMAGE_TAG', docker_image_base.replace(':','-').replace('_','') + "-latest") + env['CM_DOCKER_IMAGE_TAG'] = docker_image_tag + + return diff --git a/script/run-mlperf-inference-app/README-extra.md b/script/run-mlperf-inference-app/README-extra.md new file mode 100644 index 0000000000..b91bf8e31b --- /dev/null +++ b/script/run-mlperf-inference-app/README-extra.md @@ -0,0 +1,21 @@ +# About + +This is a universal CM interface to run and customize all MLPerf inference benchmarks. +It is composed from the [portable automation recipes (CM scripts)](https://access.cknowledge.org/playground/?action=scripts). + +Check [this documentation](https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference) +and [CM GUI](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725) +to learn how to run MLPerf benchmarks via CM. + + + +# Authors + +* [Grigori Fursin](https://cKnowledge.org/gfursin) +* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) + + +# Acknowledgments + +We thank [the community](../../../CONTRIBUTING.md) for their suggestions and contributions! + diff --git a/script/run-mlperf-inference-app/README.md b/script/run-mlperf-inference-app/README.md new file mode 100644 index 0000000000..0c5d2d83c1 --- /dev/null +++ b/script/run-mlperf-inference-app/README.md @@ -0,0 +1,400 @@ +Automatically generated README for this automation recipe: **run-mlperf-inference-app** + +Category: **Modular MLPerf inference benchmark pipeline** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-mlperf-inference-app,4a5d5b13fd7e4ac8) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-app)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *run-mlperf,inference* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run-mlperf,inference" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run-mlperf,inference` + +`cm run script --tags=run-mlperf,inference[,variations] [--input_flags]` + +*or* + +`cmr "run-mlperf,inference"` + +`cmr "run-mlperf,inference [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + + +#### Input Flags + +* --**division**=MLPerf division {open,closed} (*open*) +* --**category**=MLPerf category {edge,datacenter,network} (*edge*) +* --**device**=MLPerf device {cpu,cuda,rocm,qaic} (*cpu*) +* --**model**=MLPerf model {resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,3d-unet-99.9,rnnt,dlrm-v2-99,dlrm-v2-99.9,gptj-99,gptj-99.9,sdxl,llama2-70b-99,llama2-70b-99.9,mobilenet,efficientnet} (*resnet50*) +* --**precision**=MLPerf model precision {float32,float16,bfloat16,int8,uint8} +* --**implementation**=MLPerf implementation {mlcommons-python,mlcommons-cpp,nvidia,intel,qualcomm,ctuning-cpp-tflite} (*mlcommons-python*) +* --**backend**=MLPerf framework (backend) {onnxruntime,tf,pytorch,deepsparse,tensorrt,glow,tvm-onnx} (*onnxruntime*) +* --**scenario**=MLPerf scenario {Offline,Server,SingleStream,MultiStream} (*Offline*) +* --**mode**=MLPerf benchmark mode {,accuracy,performance} +* --**execution_mode**=MLPerf execution mode {test,fast,valid} (*test*) +* --**sut**=SUT configuration (if known) +* --**submitter**=Submitter name (without space) (*CTuning*) +* --**results_dir**=Folder path to store results (defaults to the current working directory) +* --**submission_dir**=Folder path to store MLPerf submission tree +* --**adr.compiler.tags**=Compiler for loadgen and any C/C++ part of implementation +* --**adr.inference-src-loadgen.env.CM_GIT_URL**=Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) +* --**adr.inference-src.env.CM_GIT_URL**=Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) +* --**adr.mlperf-inference-implementation.max_batchsize**=Maximum batchsize to be used +* --**adr.mlperf-inference-implementation.num_threads**=Number of threads (reference & C++ implementation only) +* --**adr.python.name**=Python virtual environment name (optional) +* --**adr.python.version**=Force Python version (must have all system deps) +* --**adr.python.version_min**=Minimal Python version (*3.8*) +* --**power**=Measure power {yes,no} (*no*) +* --**adr.mlperf-power-client.power_server**=MLPerf Power server IP address (*192.168.0.15*) +* --**adr.mlperf-power-client.port**=MLPerf Power server port (*4950*) +* --**clean**=Clean run (*False*) +* --**compliance**=Whether to run compliance tests (applicable only for closed division) {yes,no} (*no*) +* --**dashboard_wb_project**=W&B dashboard project (*cm-mlperf-dse-testing*) +* --**dashboard_wb_user**=W&B dashboard user (*cmind*) +* --**hw_name**=MLPerf hardware name (for example "gcp.c3_standard_8", "nvidia_orin", "lenovo_p14s_gen_4_windows_11", "macbook_pro_m1_2", "thundercomm_rb6" ...) +* --**multistream_target_latency**=Set MultiStream target latency +* --**offline_target_qps**=Set LoadGen Offline target QPS +* --**quiet**=Quiet run (select default values for all questions) (*True*) +* --**server_target_qps**=Set Server target QPS +* --**singlestream_target_latency**=Set SingleStream target latency +* --**target_latency**=Set Target latency +* --**target_qps**=Set LoadGen target QPS +* --**j**=Print results dictionary to console at the end of the run (*False*) +* --**repro**=Record input/output/state/info files to make it easier to reproduce results (*False*) +* --**time**=Print script execution time at the end of the run (*True*) +* --**debug**=Debug this script (*False*) + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "division":...} +``` +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run-mlperf,inference' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run-mlperf,inference"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run-mlperf,inference) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run-mlperf,inference[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_all-scenarios` + - Environment variables: + - *CM_MLPERF_LOADGEN_ALL_SCENARIOS*: `yes` + - Workflow: + * `_compliance` + - Environment variables: + - *CM_MLPERF_LOADGEN_COMPLIANCE*: `yes` + - Workflow: + * `_dashboard` + - Environment variables: + - *CM_MLPERF_DASHBOARD*: `on` + - Workflow: + +
+ + + * Group "**benchmark-version**" +
+ Click here to expand this section. + + * `_r2.1` + - Environment variables: + - *CM_MLPERF_INFERENCE_VERSION*: `2.1` + - *CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS*: `r2.1_default` + - Workflow: + * `_r3.0` + - Environment variables: + - *CM_MLPERF_INFERENCE_VERSION*: `3.0` + - *CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS*: `r3.0_default` + - Workflow: + * `_r3.1` + - Environment variables: + - *CM_MLPERF_INFERENCE_VERSION*: `3.1` + - *CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS*: `r3.1_default` + - Workflow: + * `_r4.0` + - Environment variables: + - *CM_MLPERF_INFERENCE_VERSION*: `4.0` + - *CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS*: `r4.0_default` + - Workflow: + +
+ + + * Group "**mode**" +
+ Click here to expand this section. + + * `_all-modes` + - Environment variables: + - *CM_MLPERF_LOADGEN_ALL_MODES*: `yes` + - Workflow: + +
+ + + * Group "**submission-generation**" +
+ Click here to expand this section. + + * `_accuracy-only` + - Environment variables: + - *CM_MLPERF_LOADGEN_MODE*: `accuracy` + - *CM_MLPERF_SUBMISSION_RUN*: `yes` + - *CM_RUN_MLPERF_ACCURACY*: `on` + - *CM_RUN_SUBMISSION_CHECKER*: `no` + - Workflow: + * **`_find-performance`** (default) + - Environment variables: + - *CM_MLPERF_FIND_PERFORMANCE_MODE*: `yes` + - *CM_MLPERF_LOADGEN_ALL_MODES*: `no` + - *CM_MLPERF_LOADGEN_MODE*: `performance` + - *CM_MLPERF_RESULT_PUSH_TO_GITHUB*: `False` + - Workflow: + * `_performance-only` + - Environment variables: + - *CM_MLPERF_LOADGEN_MODE*: `performance` + - *CM_MLPERF_SUBMISSION_RUN*: `yes` + - *CM_RUN_SUBMISSION_CHECKER*: `no` + - Workflow: + * `_populate-readme` + - Environment variables: + - *CM_MLPERF_README*: `yes` + - *CM_MLPERF_SUBMISSION_RUN*: `yes` + - *CM_RUN_SUBMISSION_CHECKER*: `no` + - Workflow: + * `_submission` + - Environment variables: + - *CM_MLPERF_LOADGEN_COMPLIANCE*: `yes` + - *CM_MLPERF_SUBMISSION_RUN*: `yes` + - *CM_RUN_MLPERF_ACCURACY*: `on` + - *CM_RUN_SUBMISSION_CHECKER*: `yes` + - *CM_TAR_SUBMISSION_DIR*: `yes` + - Workflow: + 1. ***Read "post_deps" on other CM scripts*** + * generate,mlperf,inference,submission + * `if (CM_MLPERF_SKIP_SUBMISSION_GENERATION in ['no', 'false', 'False', '0'])` + * CM names: `--adr.['submission-generator']...` + - CM script: [generate-mlperf-inference-submission](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/generate-mlperf-inference-submission) + +
+ + + * Group "**submission-generation-style**" +
+ Click here to expand this section. + + * `_full` + - Environment variables: + - *CM_MLPERF_SUBMISSION_GENERATION_STYLE*: `full` + - *CM_MLPERF_SKIP_SUBMISSION_GENERATION*: `yes` + - Workflow: + * **`_short`** (default) + - Environment variables: + - *CM_MLPERF_SUBMISSION_GENERATION_STYLE*: `short` + - Workflow: + +
+ + +#### Default variations + +`_find-performance,_short` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--backend=value` → `CM_MLPERF_BACKEND=value` +* `--batch_size=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--category=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value` +* `--clean=value` → `CM_MLPERF_CLEAN_ALL=value` +* `--compliance=value` → `CM_MLPERF_LOADGEN_COMPLIANCE=value` +* `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value` +* `--dashboard_wb_user=value` → `CM_MLPERF_DASHBOARD_WANDB_USER=value` +* `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value` +* `--device=value` → `CM_MLPERF_DEVICE=value` +* `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value` +* `--docker=value` → `CM_MLPERF_USE_DOCKER=value` +* `--dump_version_info=value` → `CM_DUMP_VERSION_INFO=value` +* `--execution_mode=value` → `CM_MLPERF_RUN_STYLE=value` +* `--find_performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value` +* `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value` +* `--hw_name=value` → `CM_HW_NAME=value` +* `--hw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` +* `--imagenet_path=value` → `IMAGENET_PATH=value` +* `--implementation=value` → `CM_MLPERF_IMPLEMENTATION=value` +* `--lang=value` → `CM_MLPERF_IMPLEMENTATION=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--model=value` → `CM_MLPERF_MODEL=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--network=value` → `CM_NETWORK_LOADGEN=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `OUTPUT_BASE_DIR=value` +* `--output_summary=value` → `MLPERF_INFERENCE_SUBMISSION_SUMMARY=value` +* `--output_tar=value` → `MLPERF_INFERENCE_SUBMISSION_TAR_FILE=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--power=value` → `CM_SYSTEM_POWER=value` +* `--precision=value` → `CM_MLPERF_MODEL_PRECISION=value` +* `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` +* `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` +* `--readme=value` → `CM_MLPERF_README=value` +* `--regenerate_accuracy_file=value` → `CM_MLPERF_REGENERATE_ACCURACY_FILE=value` +* `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` +* `--rerun=value` → `CM_RERUN=value` +* `--results_dir=value` → `OUTPUT_BASE_DIR=value` +* `--results_git_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value` +* `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value` +* `--run_style=value` → `CM_MLPERF_RUN_STYLE=value` +* `--save_console_log=value` → `CM_SAVE_CONSOLE_LOG=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--skip_submission_generation=value` → `CM_MLPERF_SKIP_SUBMISSION_GENERATION=value` +* `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value` +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` +* `--sut=value` → `CM_MLPERF_INFERENCE_SUT_VARIATION=value` +* `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value` +* `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` +* `--system_type=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` +* `--threads=value` → `CM_NUM_THREADS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "backend":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_IMPLEMENTATION: `reference` +* CM_MLPERF_MODEL: `resnet50` +* CM_MLPERF_RUN_STYLE: `test` + +
+ +#### Versions +* `master` +* `r2.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-app/_cm.yaml)*** + * detect,os + * `if (CM_MLPERF_USE_DOCKER != True)` + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect,cpu + * `if (CM_MLPERF_USE_DOCKER != True)` + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + * get,python3 + * `if (CM_MLPERF_USE_DOCKER != True)` + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,sut,description + - CM script: [get-mlperf-inference-sut-description](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-sut-description) + * get,mlperf,inference,results,dir + * `if (CM_MLPERF_USE_DOCKER == False) AND (OUTPUT_BASE_DIR != True)` + * CM names: `--adr.['get-mlperf-inference-results-dir']...` + - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir) + * install,pip-package,for-cmind-python,_package.tabulate + - CM script: [install-pip-package-for-cmind-python](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pip-package-for-cmind-python) + * get,mlperf,inference,utils + - CM script: [get-mlperf-inference-utils](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-utils) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-app/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-app/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-app/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-app/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-app/_cm.yaml) + +___ +### Script output +`cmr "run-mlperf,inference [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-mlperf-inference-app/_cm.yaml b/script/run-mlperf-inference-app/_cm.yaml new file mode 100644 index 0000000000..17e689fe30 --- /dev/null +++ b/script/run-mlperf-inference-app/_cm.yaml @@ -0,0 +1,501 @@ +alias: run-mlperf-inference-app +uid: 4a5d5b13fd7e4ac8 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Modular MLPerf inference benchmark pipeline + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +gui: + title: CM GUI to run MLPerf inference benchmarks and prepare submissions + +clean_output_files: +- open.tar.gz +- summary.csv +- summary.json + +tags: +- run +- common +- generate-run-cmds +- run-mlperf +- run-mlperf-inference +- vision +- mlcommons +- mlperf +- inference +- reference + +tags_help: "run-mlperf,inference" + +default_env: + CM_MLPERF_IMPLEMENTATION: reference + CM_MLPERF_MODEL: resnet50 + CM_MLPERF_RUN_STYLE: test + +input_mapping: + backend: CM_MLPERF_BACKEND + category: CM_MLPERF_SUBMISSION_SYSTEM_TYPE + clean: CM_MLPERF_CLEAN_ALL + compliance: CM_MLPERF_LOADGEN_COMPLIANCE + dashboard_wb_project: CM_MLPERF_DASHBOARD_WANDB_PROJECT + dashboard_wb_user: CM_MLPERF_DASHBOARD_WANDB_USER + debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM + device: CM_MLPERF_DEVICE + division: CM_MLPERF_SUBMISSION_DIVISION + docker: CM_MLPERF_USE_DOCKER + dump_version_info: CM_DUMP_VERSION_INFO + save_console_log: CM_SAVE_CONSOLE_LOG + execution_mode: CM_MLPERF_RUN_STYLE + find_performance: CM_MLPERF_FIND_PERFORMANCE_MODE + gpu_name: CM_NVIDIA_GPU_NAME + hw_name: CM_HW_NAME + hw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA + imagenet_path: IMAGENET_PATH + implementation: CM_MLPERF_IMPLEMENTATION + lang: CM_MLPERF_IMPLEMENTATION + mode: CM_MLPERF_LOADGEN_MODE + model: CM_MLPERF_MODEL + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + network: CM_NETWORK_LOADGEN + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + output_dir: OUTPUT_BASE_DIR + output_summary: MLPERF_INFERENCE_SUBMISSION_SUMMARY + output_tar: MLPERF_INFERENCE_SUBMISSION_TAR_FILE + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + power: CM_SYSTEM_POWER + precision: CM_MLPERF_MODEL_PRECISION + preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR + push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB + readme: CM_MLPERF_README + regenerate_accuracy_file: CM_MLPERF_REGENERATE_ACCURACY_FILE + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + results_dir: OUTPUT_BASE_DIR + results_git_url: CM_MLPERF_RESULTS_GIT_REPO_URL + run_checker: CM_RUN_SUBMISSION_CHECKER + run_style: CM_MLPERF_RUN_STYLE + scenario: CM_MLPERF_LOADGEN_SCENARIO + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + skip_submission_generation: CM_MLPERF_SKIP_SUBMISSION_GENERATION + skip_truncation: CM_SKIP_TRUNCATE_ACCURACY + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: CM_MLPERF_SUBMITTER + sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS + sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA + system_type: CM_MLPERF_SUBMISSION_SYSTEM_TYPE + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + test_query_count: CM_TEST_QUERY_COUNT + threads: CM_NUM_THREADS + batch_size: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + sut: CM_MLPERF_INFERENCE_SUT_VARIATION + +new_state_keys: +- app_mlperf_inference_* +- cm-mlperf-inference-results* + +deps: +- tags: detect,os + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] +- tags: detect,cpu + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] +- names: + - python + - python3 + tags: get,python3 + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] +- names: + - inference-src + tags: get,mlcommons,inference,src +- tags: get,sut,description + +- tags: get,mlperf,inference,results,dir + names: + - get-mlperf-inference-results-dir + enable_if_env: + CM_MLPERF_USE_DOCKER: [ off ] + skip_if_env: + OUTPUT_BASE_DIR: [ on ] +- tags: install,pip-package,for-cmind-python,_package.tabulate +- tags: get,mlperf,inference,utils + +docker: + mounts: + - ${{ INSTALL_DATA_PATH }}:/install_data + - ${{ DATA_PATH }}:/data + - ${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }} + - ${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }} + skip_run_cmd: 'no' + shm_size: '32gb' + extra_run_args: ' --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + docker_os: ubuntu + docker_real_run: false + run: true + interactive: true + docker_input_mapping: + imagenet_path: IMAGENET_PATH + gptj_checkpoint_path: GPTJ_CHECKPOINT_PATH + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + results_dir: RESULTS_DIR + submission_dir: SUBMISSION_DIR + dlrm_data_path: DLRM_DATA_PATH + intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + +variations: + + accuracy-only: + default_variations: + submission-generation-style: full + env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_MLPERF_ACCURACY: 'on' + CM_RUN_SUBMISSION_CHECKER: 'no' + group: submission-generation + + all-modes: + env: + CM_MLPERF_LOADGEN_ALL_MODES: 'yes' + group: mode + + all-scenarios: + env: + CM_MLPERF_LOADGEN_ALL_SCENARIOS: 'yes' + + compliance: + env: + CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' + + dashboard: + default_gui: false + env: + CM_MLPERF_DASHBOARD: 'on' + + find-performance: + default: true + env: + CM_MLPERF_FIND_PERFORMANCE_MODE: 'yes' + CM_MLPERF_LOADGEN_ALL_MODES: 'no' + CM_MLPERF_LOADGEN_MODE: performance + CM_MLPERF_RESULT_PUSH_TO_GITHUB: false + group: submission-generation + + full: + add_deps_recursive: + coco2014-original: + tags: _full + coco2014-preprocessed: + tags: _full + imagenet-original: + tags: _full + imagenet-preprocessed: + tags: _full + openimages-original: + tags: _full + openimages-preprocessed: + tags: _full + openorca-original: + tags: _full + openorca-preprocessed: + tags: _full + env: + CM_MLPERF_SUBMISSION_GENERATION_STYLE: full + CM_MLPERF_SKIP_SUBMISSION_GENERATION: 'yes' + group: submission-generation-style + + performance-only: + default_variations: + submission-generation-style: full + env: + CM_MLPERF_LOADGEN_MODE: performance + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_SUBMISSION_CHECKER: 'no' + group: submission-generation + + populate-readme: + base: + - all-modes + default_variations: + submission-generation-style: full + env: + CM_MLPERF_README: 'yes' + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_SUBMISSION_CHECKER: 'no' + group: submission-generation + + r2.1: + env: + CM_MLPERF_INFERENCE_VERSION: '2.1' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r2.1_default + group: benchmark-version + + r3.0: + env: + CM_MLPERF_INFERENCE_VERSION: '3.0' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.0_default + group: benchmark-version + + r3.1: + env: + CM_MLPERF_INFERENCE_VERSION: '3.1' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.1_default + group: benchmark-version + + r4.0: + env: + CM_MLPERF_INFERENCE_VERSION: '4.0' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.0_default + group: benchmark-version + + short: + add_deps_recursive: + submission-checker: + tags: _short-run + default: 'true' + env: + CM_MLPERF_SUBMISSION_GENERATION_STYLE: short + group: submission-generation-style + + submission: + base: + - all-modes + default_gui: true + default_variations: + submission-generation-style: full + env: + CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_MLPERF_ACCURACY: 'on' + CM_RUN_SUBMISSION_CHECKER: 'yes' + CM_TAR_SUBMISSION_DIR: 'yes' + group: submission-generation + post_deps: + - names: + - submission-generator + enable_if_env: + CM_MLPERF_SKIP_SUBMISSION_GENERATION: + - 'no' + - 'false' + - 'False' + - '0' + tags: generate,mlperf,inference,submission + +versions: + master: {} + r2.1: {} + +input_description: + division: + choices: + - 'open' + - 'closed' + default: 'open' + desc: MLPerf division + sort: 50 + category: + choices: + - 'edge' + - 'datacenter' + - 'network' + default: 'edge' + desc: MLPerf category + sort: 60 + device: + choices: + - cpu + - cuda + - rocm + - qaic + default: cpu + desc: MLPerf device + sort: 100 + model: + choices: + - resnet50 + - retinanet + - bert-99 + - bert-99.9 + - 3d-unet-99 + - 3d-unet-99.9 + - rnnt + - dlrm-v2-99 + - dlrm-v2-99.9 + - gptj-99 + - gptj-99.9 + - sdxl + - llama2-70b-99 + - llama2-70b-99.9 + - mobilenet + - efficientnet + default: resnet50 + desc: MLPerf model + sort: 200 + precision: + choices: + - float32 + - float16 + - bfloat16 + - int8 + - uint8 + default: '' + desc: MLPerf model precision + sort: 250 + implementation: + choices: + - mlcommons-python + - mlcommons-cpp + - nvidia + - intel + - qualcomm + - ctuning-cpp-tflite + default: mlcommons-python + desc: MLPerf implementation + sort: 300 + backend: + choices: + - onnxruntime + - tf + - pytorch + - deepsparse + - tensorrt + - glow + - tvm-onnx + default: onnxruntime + desc: MLPerf framework (backend) + sort: 400 + scenario: + choices: + - Offline + - Server + - SingleStream + - MultiStream + default: Offline + desc: MLPerf scenario + sort: 500 + mode: + choices: + - '' + - accuracy + - performance + default: '' + desc: MLPerf benchmark mode + sort: 600 + execution_mode: + choices: + - test + - fast + - valid + default: test + desc: MLPerf execution mode + sort: 700 + sut: + default: '' + desc: SUT configuration (if known) + sort: 750 + submitter: + default: CTuning + desc: Submitter name (without space) + sort: 800 + results_dir: + desc: Folder path to store results (defaults to the current working directory) + default: '' + sort: 900 + submission_dir: + desc: Folder path to store MLPerf submission tree + default: '' + sort: 1000 + + adr.compiler.tags: + desc: Compiler for loadgen and any C/C++ part of implementation + adr.inference-src-loadgen.env.CM_GIT_URL: + default: '' + desc: Git URL for MLPerf inference sources to build LoadGen (to enable non-reference + implementations) + adr.inference-src.env.CM_GIT_URL: + default: '' + desc: Git URL for MLPerf inference sources to run benchmarks (to enable non-reference + implementations) + adr.mlperf-inference-implementation.max_batchsize: + desc: Maximum batchsize to be used + adr.mlperf-inference-implementation.num_threads: + desc: Number of threads (reference & C++ implementation only) + adr.python.name: + desc: Python virtual environment name (optional) + adr.python.version: + desc: Force Python version (must have all system deps) + adr.python.version_min: + default: '3.8' + desc: Minimal Python version + power: + choices: + - 'yes' + - 'no' + default: 'no' + desc: Measure power + sort: 5000 + adr.mlperf-power-client.power_server: + default: '192.168.0.15' + desc: MLPerf Power server IP address + sort: 5005 + adr.mlperf-power-client.port: + default: 4950 + desc: MLPerf Power server port + sort: 5010 + clean: + boolean: true + default: false + desc: Clean run + compliance: + choices: + - 'yes' + - 'no' + default: 'no' + desc: Whether to run compliance tests (applicable only for closed division) + dashboard_wb_project: + desc: W&B dashboard project + default: cm-mlperf-dse-testing + dashboard_wb_user: + desc: W&B dashboard user + default: cmind + hw_name: + desc: MLPerf hardware name (for example "gcp.c3_standard_8", "nvidia_orin", "lenovo_p14s_gen_4_windows_11", "macbook_pro_m1_2", "thundercomm_rb6" ...) + multistream_target_latency: + desc: Set MultiStream target latency + offline_target_qps: + desc: Set LoadGen Offline target QPS + quiet: + boolean: true + default: true + desc: Quiet run (select default values for all questions) + server_target_qps: + desc: Set Server target QPS + singlestream_target_latency: + desc: Set SingleStream target latency + target_latency: + desc: Set Target latency + target_qps: + desc: Set LoadGen target QPS + j: + desc: Print results dictionary to console at the end of the run + boolean: true + default: false + repro: + desc: Record input/output/state/info files to make it easier to reproduce results + boolean: true + default: false + time: + desc: Print script execution time at the end of the run + boolean: true + default: true + debug: + desc: Debug this script + boolean: true + default: false + diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py new file mode 100644 index 0000000000..16f5412358 --- /dev/null +++ b/script/run-mlperf-inference-app/customize.py @@ -0,0 +1,797 @@ +from cmind import utils +import os +import json +import shutil +import subprocess +import cmind as cm +import copy +from tabulate import tabulate + +summary_ext = ['.csv', '.json', '.xlsx'] + +################################################################################## +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + inp = i['input'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return':0} + + dump_version_info = env.get('CM_DUMP_VERSION_INFO', True) + system_meta = state['CM_SUT_META'] + env['CM_SUT_META_EXISTS'] = "yes" + + env['CM_MODEL'] = env['CM_MLPERF_MODEL'] + + # Clean MLPerf inference output tar file if non-standard + x=env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE','') + if x!='' and os.path.isfile(x): + os.remove(x) + + # Clean MLPerf inference submission summary files + x=env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY','') + if x!='': + for y in summary_ext: + z = x+y + if os.path.isfile(z): + os.remove(z) + + if env.get('CM_MLPERF_SUBMISSION_SYSTEM_TYPE', '') != '': + system_type = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] + system_meta['system_type'] = system_type + + if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': + division = env['CM_MLPERF_SUBMISSION_DIVISION'] + system_meta['division'] = division + + if system_meta.get('division', '') != "closed": + env["CM_MLPERF_LOADGEN_COMPLIANCE"] = "no" #no compliance runs needed for open division + + clean = False + + if 'CM_MLPERF_CLEAN_ALL' in env: + clean = True + if 'CM_MLPERF_CLEAN_SUBMISSION_DIR' not in env: + env['CM_MLPERF_CLEAN_SUBMISSION_DIR'] = "yes" + if 'CM_RERUN' not in env: + env['CM_RERUN'] = "yes" + + if str(env.get('CM_SYSTEM_POWER','no')).lower() != "no" or env.get('CM_MLPERF_POWER', '') == "yes": + power_variation = ",_power" + env['CM_MLPERF_POWER'] = "yes" + else: + power_variation = "" + + if env.get('CM_RUN_STYLE', '') == "valid" and 'CM_RUN_MLPERF_ACCURACY' not in env: + env['CM_RUN_MLPERF_ACCURACY'] = "on" + + print("Using MLCommons Inference source from " + env['CM_MLPERF_INFERENCE_SOURCE']) + + + if 'CM_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = "" + + if 'CM_MLPERF_LOADGEN_MODES' not in env: + if 'CM_MLPERF_LOADGEN_MODE' not in env: + env['CM_MLPERF_LOADGEN_MODE'] = "performance" + + if 'CM_MLPERF_LOADGEN_SCENARIOS' not in env: + if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: + env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if env.get('CM_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": + env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios(env['CM_MODEL'], system_meta['system_type'], env['CM_MLPERF_LAST_RELEASE'], env['CM_MLPERF_INFERENCE_SOURCE']) + else: + system_meta = {} + env['CM_MLPERF_LOADGEN_SCENARIOS'] = [ env['CM_MLPERF_LOADGEN_SCENARIO'] ] + + if env.get('CM_MLPERF_LOADGEN_ALL_MODES', '') == "yes": + env['CM_MLPERF_LOADGEN_MODES'] = [ "performance", "accuracy" ] + else: + env['CM_MLPERF_LOADGEN_MODES'] = [ env['CM_MLPERF_LOADGEN_MODE'] ] + + if env.get('OUTPUT_BASE_DIR', '') == '': + env['OUTPUT_BASE_DIR'] = env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + + test_list = ["TEST01", "TEST05"] + if env['CM_MODEL'] in ["resnet50"]: + test_list.append("TEST04") + if "gpt" in env['CM_MODEL'] or "sdxl" in env['CM_MODEL'] or "llama2-70b" in env['CM_MODEL']: + test_list.remove("TEST01") + test_list.remove("TEST05") + + variation_implementation= "_" + env.get("CM_MLPERF_IMPLEMENTATION", "reference") + variation_model= ",_" + env["CM_MLPERF_MODEL"] + variation_backend= ",_" + env["CM_MLPERF_BACKEND"] if env.get("CM_MLPERF_BACKEND","") != "" else "" + variation_device= ",_" + env["CM_MLPERF_DEVICE"] if env.get("CM_MLPERF_DEVICE","") != "" else "" + variation_run_style= ",_" + env.get("CM_MLPERF_RUN_STYLE", "test") + variation_reproducibility= ",_" + env["CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get("CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS","") != "" else "" + + if env.get("CM_MLPERF_MODEL_PRECISION", '') != '': + variation_quantization_string= ",_" + env["CM_MLPERF_MODEL_PRECISION"] + else: + variation_quantization_string = "" + + tags = "app,mlperf,inference,generic,"+variation_implementation+variation_model+variation_backend+variation_device+variation_run_style+variation_reproducibility+variation_quantization_string+power_variation + verbose = inp.get('v', False) + print_env = inp.get('print_env', False) + print_deps = inp.get('print_deps', False) + add_deps_recursive = inp.get('add_deps_recursive', {}) + add_deps = inp.get('add_deps', {}) + ad = inp.get('ad', {}) + adr = inp.get('adr', {}) + adr_from_meta = i['run_script_input'].get('add_deps_recursive') + + for key in adr_from_meta: + add_deps_recursive[key] = adr_from_meta[key] + + if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': + if not add_deps_recursive.get('mlperf-inference-implementation', {}): + add_deps_recursive['mlperf-inference-implementation'] = {} + if add_deps_recursive['mlperf-inference-implementation'].get('tags', '') == '': + add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + else: + add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' + add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size."+env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'] + + if env.get('CM_MLPERF_INFERENCE_SUT_VARIATION', '') != '': + if not add_deps_recursive.get('mlperf-inference-implementation', {}): + add_deps_recursive['mlperf-inference-implementation'] = {} + if add_deps_recursive['mlperf-inference-implementation'].get('tags', '') == '': + add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + else: + add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' + add_deps_recursive['mlperf-inference-implementation']['tags'] += "_"+env['CM_MLPERF_INFERENCE_SUT_VARIATION'] + + if env.get('CM_NETWORK_LOADGEN', '') != '': + if not add_deps_recursive.get('mlperf-inference-implementation', {}): + add_deps_recursive['mlperf-inference-implementation'] = {} + network_variation_tag = f"_network-{env['CM_NETWORK_LOADGEN']}" + if add_deps_recursive['mlperf-inference-implementation'].get('tags', '') == '': + add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + else: + add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' + add_deps_recursive['mlperf-inference-implementation']['tags'] += network_variation_tag + + if env.get('CM_OUTPUT_FOLDER_NAME', '') == '': + env['CM_OUTPUT_FOLDER_NAME'] = env['CM_MLPERF_RUN_STYLE'] + "_results" + + output_dir = os.path.join(env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME']) + if clean: + path_to_clean = output_dir + + print ('=========================================================') + print ('Cleaning results in {}'.format(path_to_clean)) + if os.path.exists(path_to_clean): + shutil.rmtree(path_to_clean) + + print ('=========================================================') + + if str(env.get('CM_MLPERF_USE_DOCKER', '')).lower() in [ "1", "true", "yes"]: + action = "docker" + del(env['OUTPUT_BASE_DIR']) + state = {} + docker_extra_input = {} + for k in inp: + if k.startswith("docker_"): + docker_extra_input[k] = inp[k] + inp = {} + else: + action = "run" + + #local_keys = [ 'CM_MLPERF_SKIP_RUN', 'CM_MLPERF_LOADGEN_QUERY_COUNT', 'CM_MLPERF_LOADGEN_TARGET_QPS', 'CM_MLPERF_LOADGEN_TARGET_LATENCY' ] + + for scenario in env['CM_MLPERF_LOADGEN_SCENARIOS']: + scenario_tags = tags + ",_"+scenario.lower() + env['CM_MLPERF_LOADGEN_SCENARIO'] = scenario + + if scenario == "Offline": + if env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'): + env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'] + elif scenario == "Server": + if env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'): + env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'] + elif scenario == "SingleStream": + if env.get('CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): + env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] + elif scenario == "MultiStream": + if env.get('CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'): + env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'] + + for mode in env['CM_MLPERF_LOADGEN_MODES']: + env['CM_MLPERF_LOADGEN_MODE'] = mode + + print(f"\nRunning loadgen scenario: {scenario} and mode: {mode}") + ii = {'action':action, 'automation':'script', 'tags': scenario_tags, 'quiet': 'true', + 'env': copy.deepcopy(env), 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': + copy.deepcopy(add_deps_recursive), 'ad': ad, 'adr': copy.deepcopy(adr), 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} + if action == "docker": + for k in docker_extra_input: + ii[k] = docker_extra_input[k] + r = cm.access(ii) + if r['return'] > 0: + return r + + if env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": + for test in test_list: + env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test + env['CM_MLPERF_LOADGEN_MODE'] = "compliance" + ii = {'action':action, 'automation':'script', 'tags': scenario_tags, 'quiet': 'true', + 'env': copy.deepcopy(env), 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': + copy.deepcopy(add_deps_recursive), 'adr': copy.deepcopy(adr), 'ad': ad, 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} + if action == "docker": + for k in docker_extra_input: + ii[k] = docker_extra_input[k] + r = cm.access(ii) + if r['return'] > 0: + return r + + if state.get("cm-mlperf-inference-results"): + #print(state["cm-mlperf-inference-results"]) + for sut in state["cm-mlperf-inference-results"]:#only one sut will be there + # Grigori: that may not work properly since customize may have another Python than MLPerf + # (for example, if we use virtual env) + import mlperf_utils + + print(sut) + result_table, headers = mlperf_utils.get_result_table(state["cm-mlperf-inference-results"][sut]) + print(tabulate(result_table, headers = headers, tablefmt="pretty")) + + print(f"\nThe MLPerf inference results are stored at {output_dir}\n") + + return {'return':0} + + +def get_valid_scenarios(model, category, mlperf_version, mlperf_path): + + import sys + + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + + sys.path.append(submission_checker_dir) + if not os.path.exists(os.path.join(submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir,"submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) + + import submission_checker as checker + + if "dlrm-99" in model: + model = model.replace("dlrm-99", "dlrm-v2-99") + if "sdxl" in model: + model = "stable-diffusion-xl" + + config = checker.MODEL_CONFIG + + internal_model_name = config[mlperf_version]["model_mapping"].get(model, model) + + valid_scenarios = config[mlperf_version]["required-scenarios-"+category][internal_model_name] + + print("Valid Scenarios for " + model + " in " + category + " category are :" + str(valid_scenarios)) + + return valid_scenarios + +################################################################################## +def postprocess(i): + + env = i['env'] + state = i['state'] + + if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'reference': + x1 = env.get('CM_MLPERF_INFERENCE_SOURCE','') + x2 = env.get('CM_MLPERF_INFERENCE_CONF_PATH','') + + if x1 != '' and x2 != '': + print ('') + print ('Path to the MLPerf inference benchmark reference sources: {}'.format(x1)) + print ('Path to the MLPerf inference reference configuration file: {}'.format(x2)) + print ('') + + return {'return':0} + +################################################################################## +def load_md(path, path2, name): + + fn = os.path.join(path, path2, name+'.md') + + s = '' + + if os.path.isfile(fn): + r = utils.load_txt(fn) + if r['return']>0: return r + + s = r['string'] + + return {'return':0, 'string':s} + +################################################################################## +def get_url(url, path, path2, name, text): + + name_md = name+'.md' + fn = os.path.join(path, path2, name_md) + + urlx = '' + url_online = '' + if os.path.isfile(fn): + if not url.endswith('/'): url+='/' + urlx = url + path2 + '/' + name_md + + url_online = '[{}]({})'.format(text, urlx) + + return {'return':0, 'url_online':url_online} + +################################################################################## +def gui(i): + + params = i['params'] + st = i['st'] + + script_meta = i['meta'] + + misc = i['misc_module'] + + script_path = i['script_path'] + script_url = i.get('script_url','') + script_tags = i.get('script_tags', '') + + compute_meta = i.get('compute_meta',{}) + compute_tags = compute_meta.get('tags', []) + bench_meta = i.get('bench_meta',{}) + + compute_uid = compute_meta.get('uid','') + bench_uid = bench_meta.get('uid','') + + st_inputs_custom = {} + + bench_input = bench_meta.get('bench_input', {}) + + end_html = '' + + extra = {} + add_to_st_inputs = {} + + inp = script_meta['input_description'] + + # Here we can update params + v = compute_meta.get('mlperf_inference_device') + if v!=None and v!='': + inp['device']['force'] = v + + if v in ['tpu', 'gaudi']: + st.markdown('----') + st.markdown('**WARNING: unified CM workflow support for this hardware is pending - please [feel free to help](https://discord.gg/JjWNWXKxwT)!**') + return {'return':0, 'skip': True, 'end_html':end_html} + + elif 'orin' in compute_tags: + st.markdown('----') + st.markdown('**WARNING: we need to encode CM knowledge from [this Orin setp](https://github.com/mlcommons/ck/blob/master/docs/mlperf/setup/setup-nvidia-jetson-orin.md) to this GUI!**') + return {'return':0, 'skip': True, 'end_html':end_html} + + st.markdown('---') + st.markdown('**How would you like to run the MLPerf inference benchmark?**') + + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_device', 'desc':inp['device']}) + device = r.get('value2') + inp['device']['force'] = device + + + + if device == 'cpu': + inp['implementation']['choices']=['mlcommons-python', 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite'] + if 'intel' in compute_tags: + inp['implementation']['default']='intel' + else: + inp['implementation']['default']='mlcommons-python' + inp['backend']['choices']=['onnxruntime','deepsparse','pytorch','tf','tvm-onnx'] + inp['backend']['default']='onnxruntime' + elif device == 'rocm': + inp['implementation']['force']='mlcommons-python' + inp['precision']['force']='' + inp['backend']['force']='onnxruntime' + st.markdown('*WARNING: CM-MLPerf inference workflow was not tested thoroughly for AMD GPU - please feel free to test and improve!*') + elif device == 'qaic': + inp['implementation']['force']='qualcomm' + inp['precision']['force']='' + inp['backend']['force']='glow' + + + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_division', 'desc':inp['division']}) + division = r.get('value2') + inp['division']['force'] = division + + + y = 'compliance' + if division=='closed': + inp[y]['default'] = 'yes' + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_compliance', 'desc':inp[y]}) + compliance = r.get('value2') + inp[y]['force'] = compliance + + if compliance == 'yes': + st.markdown('*:red[See [online table with required compliance tests](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#5132-inference)].*') + + else: + inp[y]['force'] = 'no' + + + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_category', 'desc':inp['category']}) + category = r.get('value2') + inp['category']['force'] = category + + + + + ############################################################################# + # Implementation + v = bench_input.get('mlperf_inference_implementation') + if v!=None and v!='': + inp['implementation']['force'] = v + else: + if device == 'cuda': + inp['implementation']['choices']=['nvidia','mlcommons-python','mlcommons-cpp'] + inp['implementation']['default']='nvidia' + inp['backend']['choices']=['tensorrt','onnxruntime','pytorch'] + inp['backend']['default']='tensorrt' + + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_implementation', 'desc':inp['implementation']}) + implementation = r.get('value2') + inp['implementation']['force'] = implementation + + implementation_setup = '' + r = load_md(script_path, 'setup', 'i-'+implementation) + if r['return'] == 0: implementation_setup = r['string'] + + url_faq_implementation = '' + r = get_url(script_url, script_path, 'faq', implementation, 'FAQ online') + if r['return'] == 0: url_faq_implementation = r['url_online'] + + can_have_docker_flag = False + + if implementation == 'mlcommons-cpp': +# inp['backend']['choices'] = ['onnxruntime'] + inp['precision']['force']='float32' + inp['backend']['force'] = 'onnxruntime' + inp['model']['choices'] = ['resnet50', 'retinanet'] + st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-mlcommons-cpp)]*') + elif implementation == 'mlcommons-python': + inp['precision']['force']='float32' + if device == 'cuda': + inp['backend']['choices']=['onnxruntime','pytorch','tf'] + inp['backend']['default'] = 'onnxruntime' + st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-mlcommons-python)]*') + elif implementation == 'ctuning-cpp-tflite': + inp['precision']['force']='float32' + inp['model']['force']='resnet50' + st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite)]*') + elif implementation == 'nvidia': + inp['backend']['force'] = 'tensorrt' + extra['skip_script_docker_func'] = True + can_have_docker_flag = True + st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia)]*') + elif implementation == 'intel': + inp['model']['choices'] = ['bert-99', 'gptj-99'] + inp['model']['default'] = 'bert-99' + inp['precision']['choices'] = ['int8', 'int4'] + inp['precision']['default'] = 'int8' + inp['category']['force'] = 'datacenter' + inp['backend']['force'] = 'pytorch' + inp['sut']['default'] = 'sapphire-rapids.112c' + can_have_docker_flag = True + extra['skip_script_docker_func'] = True +# st.markdown('*:red[Note: Intel implementation require extra CM command to build and run Docker container - you will run CM commands to run MLPerf benchmarks there!]*') + st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-intel)]*') + elif implementation == 'qualcomm': + inp['model']['choices'] = ['resnet50', 'retinanet', 'bert-99'] + inp['model']['default'] = 'bert-99' + inp['precision']['default'] = 'float16' + extra['skip_script_docker_func'] = True + st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-qualcomm)]*') + + + ############################################################################# + # Backend + + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_backend', 'desc':inp['backend']}) + backend = r.get('value2') + inp['backend']['force'] = backend + + backend_setup = '' + r = load_md(script_path, 'setup', 'b-'+backend) + if r['return'] == 0: backend_setup = r['string'] + + if backend == 'deepsparse': + inp['model']['choices'] = ['resnet50', 'retinanet', 'bert-99', 'bert-99.9'] + inp['model']['default'] = 'bert-99' + inp['precision']['choices'] = ['float32', 'int8'] + inp['precision']['default'] = 'int8' + if 'force' in inp['precision']: del(inp['precision']['force']) + + + + ############################################################################# + # Model + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_model', 'desc':inp['model']}) + model = r.get('value2') + inp['model']['force'] = model + + github_doc_model = '' + + if model == 'retinanet': + x = '50' + if implementation == 'mlcommons-python': + x= '200' + st.markdown(':red[This model requires ~{}GB of free disk space for preprocessed dataset in a full/submission run!]\n'.format(x)) + + elif model.startswith('bert-'): + github_doc_model = 'bert' + + elif model.startswith('3d-unet-'): + github_doc_model = '3d-unet' + + elif model == 'rnnt': + github_doc_model = 'rnnt' + + elif model.startswith('dlrm-v2-'): + github_doc_model = 'dlrm_v2' + + elif model.startswith('gptj-'): + github_doc_model = 'gpt-j' + + elif model == 'sdxl': + github_doc_model = 'stable-diffusion-xl' + + elif model.startswith('llama2-'): + github_doc_model = 'llama2-70b' + + if github_doc_model == '': github_doc_model = model + + model_cm_url='https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format(github_doc_model) + extra_notes_online = '[Extra notes online]({})\n'.format(model_cm_url) + + st.markdown('*[CM-MLPerf GitHub docs for this model]({})*'.format(model_cm_url)) + + ############################################################################# + # Precision + if implementation == 'intel': + if model == 'bert-99': + inp['precision']['force'] = 'int8' + elif model == 'gptj-99': + inp['precision']['force'] = 'int4' + elif implementation == 'qualcomm': + if model == 'resnet50': + inp['precision']['print'] = 'int8' + elif model == 'retinanet': + inp['precision']['print'] = 'int8' + elif model == 'bert-99': + inp['precision']['print'] = 'int8/float16' + + if inp['precision'].get('force','')=='': + x = inp['precision'].get('print','') + if x!='': + st.markdown('**{}**: {}'.format(inp['precision']['desc'], x)) + else: + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_precision', 'desc':inp['precision']}) + precision = r.get('value2') + inp['precision']['force'] = precision + + ############################################################################# + # Benchmark version + + script_meta_variations = script_meta['variations'] + + choices = [''] + [k for k in script_meta_variations if script_meta_variations[k].get('group','') == 'benchmark-version'] + desc = {'choices': choices, 'default':choices[0], 'desc':'Force specific benchmark version?'} + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_version', 'desc':desc}) + benchmark_version = r.get('value2') + + if benchmark_version!='': + params['~~benchmark-version']=[benchmark_version] + + ############################################################################# + # Run via Docker container + if can_have_docker_flag: + + default_choice = 'yes - run in container' + + choices = [default_choice, 'no - run natively'] + desc = {'choices': choices, 'default':choices[0], 'desc':'Should CM script prepare and run Docker container in interactive mode to run MLPerf? You can then copy/paste CM commands generated by this GUI to benchmark different models.'} + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_docker', 'desc':desc}) + benchmark_docker = r.get('value2') + + if benchmark_docker == 'yes - run in container': + add_to_st_inputs['@docker']=True + add_to_st_inputs['@docker_cache']='no' + + ############################################################################# + # Prepare submission + st.markdown('---') + + submission = st.toggle('Would you like to prepare official submission?', value = False) + if submission: + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_hw_name', 'desc':inp['hw_name']}) + inp['hw_name']['force'] = r.get('value2') + + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_submitter', 'desc':inp['submitter']}) + submitter = r.get('value2') + inp['submitter']['force'] = submitter + + params['~~submission-generation'] = ['submission'] + params['~all-scenarios'] = ['true'] + inp['scenario']['force'] = '' + inp['clean']['default'] = False + inp['repro']['force'] = True + + x = '*:red[Use the following command to find local directory with the submission tree and results:]*\n```bash\ncm find cache --tags=submission,dir\n```\n' + + x += '*:red[You will also find results in `mlperf-inference-submission.tar.gz` file that you can submit to MLPerf!]*\n\n' + + x += '*:red[Note that if some results are INVALID due to too short run, you can rerun the same CM command and it should increase the length of the benchmark until you get valid result!]*\n' + + st.markdown(x) + + st.markdown('---') + + else: + inp['submitter']['force']='' + inp['clean']['default']=True + params['~submission']=['false'] + + choices = ['Performance', 'Accuracy', 'Find Performance from a short run', 'Performance and Accuracy'] + desc = {'choices': choices, 'default':choices[0], 'desc':'What to measure?'} + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_measure', 'desc':desc}) + measure = r.get('value2') + + x = '' + if measure == 'Performance': + x = 'performance-only' + elif measure == 'Accuracy': + x = 'accuracy-only' + elif measure == 'Find Performance from a short run': + x = 'find-performance' + elif measure == 'Performance and Accuracy': + x = 'submission' + + params['~~submission-generation']=[x] + + + ############################################################################# + # Prepare scenario + + xall = 'All applicable' + choices = ['Offline', 'Server', 'SingleStream', 'MultiStream', xall] + desc = {'choices':choices, 'default':choices[0], 'desc':'Which scenario(s)?'} + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_scenario', 'desc':desc}) + scenario = r.get('value2') + + + if scenario == xall: + params['~all-scenarios']=['true'] + inp['scenario']['force']='' + else: + inp['scenario']['force']=scenario + + + + + ############################################################################# + # Short or full run + + x = ['Full run', 'Short run'] + if submission: + choices = [x[0], x[1]] + else: + choices = [x[1], x[0]] + + desc = {'choices':choices, 'default':choices[0], 'desc':'Short (test) or full (valid) run?'} + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_how', 'desc':desc}) + how = r.get('value2') + + if how == x[0]: + params['~~submission-generation-style']=['full'] + inp['execution_mode']['force'] = 'valid' + else: + params['~~submission-generation-style']=['short'] + inp['execution_mode']['force'] = 'test' + + + + ############################################################################# + # Power + +# desc = {'boolean':True, 'default':False, 'desc':'Measure power?'} +# r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power', 'desc':desc}) +# power = r.get('value2', False) + + power = st.toggle('Measure power consumption?', value = False) + + if power: + inp['power']['force'] = 'yes' + + y = 'adr.mlperf-power-client.power_server' + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_server', 'desc':inp[y]}) + inp[y]['force'] = r.get('value2') + + y = 'adr.mlperf-power-client.port' + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_port', 'desc':inp[y]}) + inp[y]['force'] = r.get('value2') + + st.markdown('*:red[See [online notes](https://github.com/mlcommons/ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md)] to setup power meter and server.*') + + else: + inp['power']['force'] = 'no' + inp['adr.mlperf-power-client.power_server']['force']='' + inp['adr.mlperf-power-client.port']['force']='' + + + ############################################################################# + # Dashboard + +# desc = {'boolean':True, 'default':False, 'desc':'Output results to W&B dashboard?'} +# r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_dashboard', 'desc':desc}) +# dashboard = r.get('value2', False) + + dashboard = st.toggle('Output results to W&B dashboard?', value = False) + + if dashboard: + params['~dashboard']=['true'] + + y = 'dashboard_wb_project' + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_wb_project', 'desc':inp[y]}) + inp[y]['force'] = r.get('value2') + + y = 'dashboard_wb_user' + r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_wb_user', 'desc':inp[y]}) + inp[y]['force'] = r.get('value2') + + else: + params['~dashboard']=['false'] + inp['dashboard_wb_project']['force']='' + inp['dashboard_wb_user']['force']='' + + + + + # Hide customization by default + params['hide_script_customization'] = True + + x = implementation_setup + if backend_setup!='': + if x != '': x+='\n\n' + x+=backend_setup + + extra['extra_notes_online'] = extra_notes_online + extra['extra_faq_online'] = url_faq_implementation + extra['extra_setup'] = x + + ############################################################################# + value_reproduce = inp.get('repro',{}).get('force', False) + reproduce = st.toggle('Record extra info for reproducibility?', value = value_reproduce) + + explore = st.toggle('Explore/tune benchmark (batch size, threads, etc)?', value = False) + + if reproduce or explore: + add_to_st_inputs.update({ + "@repro_extra.run-mlperf-inference-app.bench_uid": bench_uid, + "@repro_extra.run-mlperf-inference-app.compute_uid": compute_uid, + '@results_dir':'{{CM_EXPERIMENT_PATH3}}', + '@submission_dir':'{{CM_EXPERIMENT_PATH3}}' + }) + + inp['repro']['force'] = True + extra['use_experiment'] = True + + if explore: + add_to_st_inputs['@batch_size']='{{CM_EXPLORE_BATCH_SIZE{[1,2,4,8]}}}' + + ############################################################################# + debug = st.toggle('Debug and run MLPerf benchmark natively from command line after CM auto-generates CMD?', value=False) + if debug: + inp['debug']['force'] = True + + + extra['add_to_st_inputs'] = add_to_st_inputs + + return {'return':0, 'end_html':end_html, 'extra':extra} diff --git a/script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md b/script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md new file mode 100644 index 0000000000..920b6243b2 --- /dev/null +++ b/script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md @@ -0,0 +1 @@ +# cTuning TFLite C++ implementation of MLPerf inference diff --git a/script/run-mlperf-inference-app/faq/deepsparse.md b/script/run-mlperf-inference-app/faq/deepsparse.md new file mode 100644 index 0000000000..63eb724912 --- /dev/null +++ b/script/run-mlperf-inference-app/faq/deepsparse.md @@ -0,0 +1 @@ +# FAQ: MLPerf inference with DeepSparse backend diff --git a/script/run-mlperf-inference-app/faq/intel.md b/script/run-mlperf-inference-app/faq/intel.md new file mode 100644 index 0000000000..79f6aa9791 --- /dev/null +++ b/script/run-mlperf-inference-app/faq/intel.md @@ -0,0 +1 @@ +# FAQ: Intel implementation of MLPerf inference diff --git a/script/run-mlperf-inference-app/faq/mlcommons-cpp.md b/script/run-mlperf-inference-app/faq/mlcommons-cpp.md new file mode 100644 index 0000000000..48700eead0 --- /dev/null +++ b/script/run-mlperf-inference-app/faq/mlcommons-cpp.md @@ -0,0 +1 @@ +# FAQ: MLCommons C++ implementation of MLPerf inference diff --git a/script/run-mlperf-inference-app/faq/mlcommons-python.md b/script/run-mlperf-inference-app/faq/mlcommons-python.md new file mode 100644 index 0000000000..d8ed888f6f --- /dev/null +++ b/script/run-mlperf-inference-app/faq/mlcommons-python.md @@ -0,0 +1 @@ +# MLCommons reference implementation of MLPerf inference diff --git a/script/run-mlperf-inference-app/faq/nvidia.md b/script/run-mlperf-inference-app/faq/nvidia.md new file mode 100644 index 0000000000..c873bf89e2 --- /dev/null +++ b/script/run-mlperf-inference-app/faq/nvidia.md @@ -0,0 +1,2 @@ +# FAQ: Nvidia implementation of MLPerf inference + diff --git a/script/run-mlperf-inference-app/faq/qualcomm.md b/script/run-mlperf-inference-app/faq/qualcomm.md new file mode 100644 index 0000000000..92af081caa --- /dev/null +++ b/script/run-mlperf-inference-app/faq/qualcomm.md @@ -0,0 +1 @@ +# FAQ: Qualcomm implementation of MLPerf inference diff --git a/script/run-mlperf-inference-app/modular-cm-containers/README.md b/script/run-mlperf-inference-app/modular-cm-containers/README.md new file mode 100644 index 0000000000..fdbe0e28af --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/README.md @@ -0,0 +1,30 @@ +***Outdated*** + +# About + +Prototyping modular and customizable CM containers for MLPerf. + +# Build + +```bash +./build.sh +``` + +# Run + +```bash +./run.sh + +cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_dashboard \ + --adr.python.version_min=3.8 \ + --submitter="modular-cm-mlperf-container" \ + --lang=python \ + --hw_name=default \ + --model=resnet50 \ + --backend=onnxruntime \ + --device=cpu \ + --scenario=Offline \ + --test_query_count=500 \ + --quiet \ + --clean +``` diff --git a/script/run-mlperf-inference-app/modular-cm-containers/_common.bat b/script/run-mlperf-inference-app/modular-cm-containers/_common.bat new file mode 100644 index 0000000000..7f9d3aab37 --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/_common.bat @@ -0,0 +1,7 @@ +rem set CM_CACHE=--no-cache + +set CM_DOCKER_ORG=modularcm +set CM_DOCKER_NAME=mlperf-inference +set CM_OS_NAME=ubuntu +set CM_HW_TARGET=cpu +set CM_OS_VERSION=22.04 diff --git a/script/run-mlperf-inference-app/modular-cm-containers/_common.sh b/script/run-mlperf-inference-app/modular-cm-containers/_common.sh new file mode 100644 index 0000000000..4d2f18aac0 --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/_common.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +#export CM_CACHE="--no-cache" + +export CM_DOCKER_ORG=modularcm +export CM_DOCKER_NAME="mlperf-inference" +export CM_OS_NAME="ubuntu" +export CM_HW_TARGET="cpu" +export CM_OS_VERSION="22.04" + diff --git a/script/run-mlperf-inference-app/modular-cm-containers/build.bat b/script/run-mlperf-inference-app/modular-cm-containers/build.bat new file mode 100644 index 0000000000..d7c097811d --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/build.bat @@ -0,0 +1,25 @@ +call _common.bat + +docker build -f %CM_DOCKER_NAME%--%CM_OS_NAME%-%CM_HW_TARGET%.Dockerfile ^ + -t %CM_DOCKER_ORG%/%CM_DOCKER_NAME%:%CM_OS_NAME%-%CM_OS_VERSION% ^ + --build-arg cm_os_name=%CM_OS_NAME% ^ + --build-arg cm_hw_target=%CM_HW_TARGET% ^ + --build-arg cm_os_version=%CM_OS_VERSION% ^ + --build-arg cm_version="" ^ + --build-arg cm_automation_repo="mlcommons@ck" ^ + --build-arg cm_automation_checkout="" ^ + --build-arg cm_python_version="3.10.8" ^ + --build-arg cm_mlperf_inference_loadgen_version="" ^ + --build-arg cm_mlperf_inference_src_tags="" ^ + --build-arg cm_mlperf_inference_src_version="" ^ + --build-arg CM_MLPERF_CHOICE_SCRIPT="" ^ + --build-arg CM_MLPERF_CHOICE_SUBMITTER="Container" ^ + --build-arg CM_MLPERF_CHOICE_IMPLEMENTATION="python" ^ + --build-arg CM_MLPERF_CHOICE_HW_NAME="default" ^ + --build-arg CM_MLPERF_CHOICE_MODEL="resnet50" ^ + --build-arg CM_MLPERF_CHOICE_BACKEND="onnxruntime" ^ + --build-arg CM_MLPERF_CHOICE_DEVICE=%CM_HW_TARGET% ^ + --build-arg CM_MLPERF_CHOICE_SCENARIO="Offline" ^ + --build-arg CM_MLPERF_CHOICE_MODE="accuracy" ^ + --build-arg CM_MLPERF_CHOICE_QUERY_COUNT="5" ^ + %CM_CACHE% . diff --git a/script/run-mlperf-inference-app/modular-cm-containers/build.sh b/script/run-mlperf-inference-app/modular-cm-containers/build.sh new file mode 100644 index 0000000000..082f00d4b3 --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/build.sh @@ -0,0 +1,27 @@ +#! /bin/bash + +. ./_common.sh + +time docker build -f ${CM_DOCKER_NAME}--${CM_OS_NAME}-${CM_HW_TARGET}.Dockerfile \ + -t ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}:${CM_OS_NAME}-${CM_OS_VERSION} \ + --build-arg cm_os_name=${CM_OS_NAME} \ + --build-arg cm_hw_target=${CM_HW_TARGET} \ + --build-arg cm_os_version=${CM_OS_VERSION} \ + --build-arg cm_version="" \ + --build-arg cm_automation_repo="mlcommons@ck" \ + --build-arg cm_automation_checkout="" \ + --build-arg cm_python_version="3.10.8" \ + --build-arg cm_mlperf_inference_loadgen_version="" \ + --build-arg cm_mlperf_inference_src_tags="" \ + --build-arg cm_mlperf_inference_src_version="" \ + --build-arg CM_MLPERF_CHOICE_SCRIPT=",_short,_submission,_dashboard" \ + --build-arg CM_MLPERF_CHOICE_SUBMITTER="Container" \ + --build-arg CM_MLPERF_CHOICE_IMPLEMENTATION="python" \ + --build-arg CM_MLPERF_CHOICE_HW_NAME="default" \ + --build-arg CM_MLPERF_CHOICE_MODEL="resnet50" \ + --build-arg CM_MLPERF_CHOICE_BACKEND="onnxruntime" \ + --build-arg CM_MLPERF_CHOICE_DEVICE=${CM_HW_TARGET} \ + --build-arg CM_MLPERF_CHOICE_SCENARIO="Offline" \ + --build-arg CM_MLPERF_CHOICE_MODE="accuracy" \ + --build-arg CM_MLPERF_CHOICE_QUERY_COUNT="500" \ + ${CM_CACHE} . diff --git a/script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile b/script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile new file mode 100644 index 0000000000..8f36dc108d --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile @@ -0,0 +1,118 @@ +# Modular MLPerf container with the MLCommons CM automation meta-framework + +# Preparing OS +ARG cm_os_name="ubuntu" +ARG cm_os_version="22.04" + +FROM ${cm_os_name}:${cm_os_version} + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +# Customization +ARG CM_GH_TOKEN + +# Prepare shell and entry point +SHELL ["/bin/bash", "-c"] +ENTRYPOINT ["/bin/bash", "-c"] + +# Install system dependencies +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +RUN apt-get update -y +RUN apt-get install -y lsb-release +RUN apt-get install -y python3 python3-pip git wget sudo + +# Extra python deps +RUN python3 -m pip install requests + +# CM version +ARG cm_version="" +ENV CM_VERSION="${cm_version}" +RUN if [ "${CM_VERSION}" != "" ] ; then \ + python3 -m pip install cmind==${CM_VERSION} ; \ + else \ + python3 -m pip install cmind ; \ + fi + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +# See example in https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU +RUN groupadd --gid 10001 cm +RUN useradd --uid 10000 -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +USER cmuser:cm +WORKDIR /home/cmuser + +# Check CM installation +RUN lsb_release -a > sys-version-os.log +RUN uname -a > sys-version-kernel.log +RUN python3 --version > sys-version-python3.log +RUN cm version > sys-version-cm.log + +################################################################################ +# Get CM automation repository +ARG cm_automation_repo="mlcommons@ck" +ARG cm_automation_repo_checkout="" +ENV CM_AUTOMATION_REPO=${cm_automation_repo} +ENV CM_AUTOMATION_REPO_CHECKOUT=${cm_automation_repo_checkout} +RUN echo ${CM_AUTOMATION_REPO} +RUN cm pull repo ${CM_AUTOMATION_REPO} --checkout=${CM_AUTOMATION_REPO_CHECKOUT} + +################################################################################ +# Install CM system dependencies +RUN cm run script "get sys-utils-cm" --quiet + +# Detect/install python +ARG cm_python_version="" +RUN cm run script "get python3" --version=${cm_python_version} + +################################################################################ +# Build MLPerf loadgen (official with correct seed for submission) +ARG cm_mlperf_inference_loadgen_version="" +RUN cm run script "get mlperf loadgen" --adr.compiler.tags=gcc --version=${cm_mlperf_inference_loadgen_version} --adr.inference-src-loadgen.version=${cm_mlperf_inference_loadgen_version} -v + +# Install MLPerf inference source (can be private development branch) +ARG cm_mlperf_inference_src_tags="" +ARG cm_mlperf_inference_src_version="" +RUN cm run script "get mlperf inference src ${cm_mlperf_inference_src_tags}" --version=${cm_mlperf_inference_src_version} -v + +################################################################################ +# Run CM automation workflow for MLPerf +# https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-app + +ARG CM_MLPERF_CHOICE_SCRIPT= +ARG CM_MLPERF_CHOICE_SUBMITTER="Container" +ARG CM_MLPERF_CHOICE_IMPLEMENTATION="python" +ARG CM_MLPERF_CHOICE_HW_NAME="default" +ARG CM_MLPERF_CHOICE_MODEL="resnet50" +ARG CM_MLPERF_CHOICE_BACKEND="onnxruntime" +ARG CM_MLPERF_CHOICE_DEVICE="cpu" +ARG CM_MLPERF_CHOICE_SCENARIO="Offline" +ARG CM_MLPERF_CHOICE_MODE="performance" +ARG CM_MLPERF_CHOICE_QUERY_COUNT="10" + +RUN cm run script --tags=run,mlperf,inference,generate-run-cmds,${CM_MLPERF_CHOICE_SCRIPT} \ + --adr.compiler.tags=gcc \ + --adr.python.version_min=3.8 \ + --adr.compiler.tags=gcc \ + --submitter="${CM_MLPERF_CHOICE_SUBMITTER}" \ + --lang=${CM_MLPERF_CHOICE_IMPLEMENTATION} \ + --hw_name=${CM_MLPERF_CHOICE_HW_NAME} \ + --model=${CM_MLPERF_CHOICE_MODEL} \ + --backend=${CM_MLPERF_CHOICE_BACKEND} \ + --device=${CM_MLPERF_CHOICE_DEVICE} \ + --scenario=${CM_MLPERF_CHOICE_SCENARIO} \ + --mode=${CM_MLPERF_CHOICE_MODE} \ + --test_query_count=${CM_MLPERF_CHOICE_QUERY_COUNT} \ + --quiet \ + --clean + +################################################################################ +# CMD entry point +CMD /bin/bash diff --git a/script/run-mlperf-inference-app/modular-cm-containers/run.bat b/script/run-mlperf-inference-app/modular-cm-containers/run.bat new file mode 100644 index 0000000000..53b13dcb99 --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/run.bat @@ -0,0 +1,3 @@ +call _common.bat + +docker run -it %CM_DOCKER_ORG%/%CM_DOCKER_NAME%:%CM_OS_NAME%-%CM_OS_VERSION% diff --git a/script/run-mlperf-inference-app/modular-cm-containers/run.sh b/script/run-mlperf-inference-app/modular-cm-containers/run.sh new file mode 100644 index 0000000000..3473716c73 --- /dev/null +++ b/script/run-mlperf-inference-app/modular-cm-containers/run.sh @@ -0,0 +1,3 @@ +. ./_common.sh + +docker run -it ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}:${CM_OS_NAME}-${CM_OS_VERSION} diff --git a/script/run-mlperf-inference-app/run_mobilenet.py b/script/run-mlperf-inference-app/run_mobilenet.py new file mode 100644 index 0000000000..b5259168a2 --- /dev/null +++ b/script/run-mlperf-inference-app/run_mobilenet.py @@ -0,0 +1,106 @@ +import cmind +import os +import sys + +models = { + "mobilenet": { + "v1": { + "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25" ], + "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], + "kind": [""] + }, + "v2": { + "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35" ], + "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], + "kind": [""] + }, + "v3": { + "multiplier": [""], + "resolution": [""], + "kind": [ "large", "large-minimalistic", "small", "small-minimalistic" ] + } + }, + "efficientnet": { + "": { + "multiplier": [""], + "resolution": [""], + "kind": [ "lite0", "lite1", "lite2", "lite3", "lite4" ] + } + } + } +variation_strings = {} +for t1 in models: + variation_strings[t1] = [] + variation_list = [] + variation_list.append(t1) + for version in models[t1]: + variation_list = [] + if version.strip(): + variation_list.append("_"+version) + variation_list_saved = variation_list.copy() + for k1 in models[t1][version]["multiplier"]: + variation_list = variation_list_saved.copy() + if k1.strip(): + variation_list.append("_"+k1) + variation_list_saved_2 = variation_list.copy() + for k2 in models[t1][version]["resolution"]: + variation_list = variation_list_saved_2.copy() + if k2.strip(): + variation_list.append("_"+k2) + variation_list_saved_3 = variation_list.copy() + for k3 in models[t1][version]["kind"]: + variation_list = variation_list_saved_3.copy() + if k3.strip(): + variation_list.append("_"+k3) + variation_strings[t1].append(",".join(variation_list)) +args = sys.argv + +opt=None +if len(args) > 1: + opt = args[1] +if opt=="submission": + var="_submission" + execution_mode="valid" +else: + var="_find-performance" + execution_mode="test" + +precisions = [ "fp32", "uint8" ] +for model in variation_strings: + for v in variation_strings[model]: + for precision in precisions: + if "small-minimalistic" in v and precision == "uint8": + continue; + if model == "efficientnet" and precision == "uint8": + precision = "int8" + cm_input = { + 'action': 'run', + 'automation': 'script', + 'tags': f'generate-run-cmds,mlperf,inference,{var}', + 'quiet': True, + 'implementation': 'tflite-cpp', + 'precision': precision, + 'model': model, + 'scenario': 'SingleStream', + 'execution_mode': execution_mode, + 'test_query_count': '50', + 'adr': { + 'tflite-model': { + 'tags': v + }, + 'compiler': { + 'tags': 'gcc' + }, + 'mlperf-inference-implementation': { + 'tags': '_armnn,_use-neon' + } + } + } + print(cm_input) + r = cmind.access(cm_input) + if r['return'] > 0: + print(r) + #exit(1) + + + diff --git a/script/run-mlperf-inference-app/setup/b-deepsparse.md b/script/run-mlperf-inference-app/setup/b-deepsparse.md new file mode 100644 index 0000000000..30957027ea --- /dev/null +++ b/script/run-mlperf-inference-app/setup/b-deepsparse.md @@ -0,0 +1 @@ +DeepSparse backend diff --git a/script/run-mlperf-inference-app/setup/i-intel.md b/script/run-mlperf-inference-app/setup/i-intel.md new file mode 100644 index 0000000000..a7079b7bcb --- /dev/null +++ b/script/run-mlperf-inference-app/setup/i-intel.md @@ -0,0 +1 @@ +CM can run Intel's MLPerf inference benchmark implementation either natively or inside a container. diff --git a/script/run-mlperf-inference-app/setup/i-nvidia.md b/script/run-mlperf-inference-app/setup/i-nvidia.md new file mode 100644 index 0000000000..bfa50410c3 --- /dev/null +++ b/script/run-mlperf-inference-app/setup/i-nvidia.md @@ -0,0 +1,3 @@ +* Container will require around 60GB of free disk space. +* Docker cache and running all models (without DLRM) will require ~600 GB free disk space. +* When you get into an interactive Docker mode, you can copy/paste CM commands generated by this GUI to benchmark different models. diff --git a/script/run-mlperf-inference-app/setup/i-qualcomm.md b/script/run-mlperf-inference-app/setup/i-qualcomm.md new file mode 100644 index 0000000000..c0aef51871 --- /dev/null +++ b/script/run-mlperf-inference-app/setup/i-qualcomm.md @@ -0,0 +1,6 @@ +* CM runs Qualcomm's MLPerf inference benchmark implementation natively. +* [QAIC SDK](https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc) must be installed. +* If you run CM-MLPerf for Qualcomm in a cloud, you may need to update/change AIM with an SDK version compatible with the Qualcomm's MLPerf implementation. + Please check [cTuning's MLPerf inference results](https://mlcommons.org/benchmarks/inference-datacenter/) to see the working QAIC SDK versions. + + diff --git a/script/run-mlperf-inference-mobilenet-models/README-about.md b/script/run-mlperf-inference-mobilenet-models/README-about.md new file mode 100644 index 0000000000..a3d6991b26 --- /dev/null +++ b/script/run-mlperf-inference-mobilenet-models/README-about.md @@ -0,0 +1,107 @@ +## Set up + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +
+Click here to set up docker (Optional). + +### Docker Setup + +CM commands are expected to run natively but if you prefer not to modify the host system, you can do the below command to set up a docker container. + +``` +cm docker script --tags=run,mobilenet-models,_tflite,_accuracy-only \ +--adr.compiler.tags=gcc \ +--docker_cm_repo=mlcommons@ck \ +--imagenet_path=$HOME/imagenet-2012-val \ +--results_dir=$HOME/mobilenet_results \ +--submission_dir=$HOME/inference_submission_3.1 \ +--docker_skip_run_cmd +``` + +This command will build a docker container and give you an interactive shell from which you can execute the below CM run commands. +* `results_dir`, `submission_dir` and `imagenet_path` are mounted from the host system. +* `results_dir` and `submission_dir` are expected to be empty directories to be populated by the docker +* `imagenet_path` should point to the imagenet folder containing the 50000 validation images. + +
+ +## Run Commands + +Since the runs can take many hours, in case you are running remotely you can install screen as follows. You may omit "screen" from all commands if you are running on a host system. +``` +cmr "get generic-sys-util _screen" +``` +### Default tflite + + +#### Do a full accuracy run for all the models (can take almost a day) + +``` +screen cmr "run mobilenet-models _tflite _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Do a full performance run for all the models (can take almost a day) +``` +screen cmr "run mobilenet-models _tflite _performance-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate README files for all the runs +``` +cmr "run mobilenet-models _tflite _populate-readme" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate actual submission tree + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cmr "generate inference submission" \ +--results_dir=$HOME/mobilenet_results/valid_results \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--clean \ +--infer_scenario_results=yes \ +--adr.compiler.tags=gcc --adr.inference-src.version=master \ +--run-checker \ +--submitter=cTuning \ +--hw_notes_extra="Result taken by NAME" +``` +* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems) + +#### Push the results to GitHub repo + +First, create a fork of [this repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/). Then run the following command after replacing `--repo_url` with your fork URL. +``` +cmr "push github mlperf inference submission" \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \ +--commit_message="Mobilenet results added" +``` + +Create a PR to [cTuning repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/) + +### Using ARMNN with NEON + +Follow the same procedure as above but for the first three experiment runs add `_armnn,_neon` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _neon _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. + +### Using ARMNN with OpenCL +Follow the same procedure as above but for the first three experiment runs add `_armnn,_opencl` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _opencl _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. diff --git a/script/run-mlperf-inference-mobilenet-models/README.md b/script/run-mlperf-inference-mobilenet-models/README.md new file mode 100644 index 0000000000..7469465e79 --- /dev/null +++ b/script/run-mlperf-inference-mobilenet-models/README.md @@ -0,0 +1,385 @@ +Automatically generated README for this automation recipe: **run-mlperf-inference-mobilenet-models** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-mlperf-inference-mobilenet-models,f21cc993a8b14a58) ]* + +--- + +## Set up + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +
+Click here to set up docker (Optional). + +### Docker Setup + +CM commands are expected to run natively but if you prefer not to modify the host system, you can do the below command to set up a docker container. + +``` +cm docker script --tags=run,mobilenet-models,_tflite,_accuracy-only \ +--adr.compiler.tags=gcc \ +--docker_cm_repo=mlcommons@ck \ +--imagenet_path=$HOME/imagenet-2012-val \ +--results_dir=$HOME/mobilenet_results \ +--submission_dir=$HOME/inference_submission_3.1 \ +--docker_skip_run_cmd +``` + +This command will build a docker container and give you an interactive shell from which you can execute the below CM run commands. +* `results_dir`, `submission_dir` and `imagenet_path` are mounted from the host system. +* `results_dir` and `submission_dir` are expected to be empty directories to be populated by the docker +* `imagenet_path` should point to the imagenet folder containing the 50000 validation images. + +
+ +## Run Commands + +Since the runs can take many hours, in case you are running remotely you can install screen as follows. You may omit "screen" from all commands if you are running on a host system. +``` +cmr "get generic-sys-util _screen" +``` +### Default tflite + + +#### Do a full accuracy run for all the models (can take almost a day) + +``` +screen cmr "run mobilenet-models _tflite _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Do a full performance run for all the models (can take almost a day) +``` +screen cmr "run mobilenet-models _tflite _performance-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate README files for all the runs +``` +cmr "run mobilenet-models _tflite _populate-readme" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate actual submission tree + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cmr "generate inference submission" \ +--results_dir=$HOME/mobilenet_results/valid_results \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--clean \ +--infer_scenario_results=yes \ +--adr.compiler.tags=gcc --adr.inference-src.version=master \ +--run-checker \ +--submitter=cTuning \ +--hw_notes_extra="Result taken by NAME" +``` +* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems) + +#### Push the results to GitHub repo + +First, create a fork of [this repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/). Then run the following command after replacing `--repo_url` with your fork URL. +``` +cmr "push github mlperf inference submission" \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \ +--commit_message="Mobilenet results added" +``` + +Create a PR to [cTuning repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/) + +### Using ARMNN with NEON + +Follow the same procedure as above but for the first three experiment runs add `_armnn,_neon` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _neon _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. + +### Using ARMNN with OpenCL +Follow the same procedure as above but for the first three experiment runs add `_armnn,_opencl` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _opencl _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mobilenet models image-classification mobilenet-models mlperf inference" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference` + +`cm run script --tags=run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference[,variations] [--input_flags]` + +*or* + +`cmr "run mobilenet models image-classification mobilenet-models mlperf inference"` + +`cmr "run mobilenet models image-classification mobilenet-models mlperf inference [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mobilenet models image-classification mobilenet-models mlperf inference[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_armnn` + - Environment variables: + - *CM_MLPERF_USE_ARMNN_LIBRARY*: `yes` + - Workflow: + * `_neon` + - Aliases: `_use-neon` + - Environment variables: + - *CM_MLPERF_USE_NEON*: `yes` + - Workflow: + * `_only-fp32` + - Environment variables: + - *CM_MLPERF_RUN_INT8*: `no` + - Workflow: + * `_only-int8` + - Environment variables: + - *CM_MLPERF_RUN_FP32*: `no` + - Workflow: + * `_opencl` + - Environment variables: + - *CM_MLPERF_USE_OPENCL*: `yes` + - Workflow: + * `_tflite,armnn` + - Environment variables: + - *CM_MLPERF_TFLITE_ARMNN*: `yes` + - Workflow: + * `_tflite,armnn,neon` + - Environment variables: + - *CM_MLPERF_TFLITE_ARMNN_NEON*: `yes` + - Workflow: + * `_tflite,armnn,opencl` + - Environment variables: + - *CM_MLPERF_TFLITE_ARMNN_OPENCL*: `yes` + - Workflow: + +
+ + + * Group "**base-framework**" +
+ Click here to expand this section. + + * **`_tflite`** (default) + - Workflow: + +
+ + + * Group "**model-selection**" +
+ Click here to expand this section. + + * **`_all-models`** (default) + - Environment variables: + - *CM_MLPERF_RUN_MOBILENETS*: `yes` + - *CM_MLPERF_RUN_EFFICIENTNETS*: `yes` + - Workflow: + * `_efficientnet` + - Environment variables: + - *CM_MLPERF_RUN_EFFICIENTNETS*: `yes` + - Workflow: + * `_mobilenet` + - Environment variables: + - *CM_MLPERF_RUN_MOBILENETS*: `yes` + - Workflow: + +
+ + + * Group "**optimization**" +
+ Click here to expand this section. + + * **`_tflite-default`** (default) + - Environment variables: + - *CM_MLPERF_TFLITE_DEFAULT_MODE*: `yes` + - Workflow: + +
+ + + * Group "**run-mode**" +
+ Click here to expand this section. + + * `_accuracy-only` + - Environment variables: + - *CM_MLPERF_FIND_PERFORMANCE_MODE*: `no` + - *CM_MLPERF_ACCURACY_MODE*: `yes` + - *CM_MLPERF_SUBMISSION_MODE*: `no` + - Workflow: + * `_find-performance` + - Environment variables: + - *CM_MLPERF_FIND_PERFORMANCE_MODE*: `yes` + - *CM_MLPERF_SUBMISSION_MODE*: `no` + - Workflow: + * `_performance-only` + - Environment variables: + - *CM_MLPERF_FIND_PERFORMANCE_MODE*: `no` + - *CM_MLPERF_PERFORMANCE_MODE*: `yes` + - *CM_MLPERF_SUBMISSION_MODE*: `no` + - Workflow: + * `_populate-readme` + - Environment variables: + - *CM_MLPERF_FIND_PERFORMANCE_MODE*: `no` + - *CM_MLPERF_POPULATE_README*: `yes` + - Workflow: + * `_submission` + - Environment variables: + - *CM_MLPERF_FIND_PERFORMANCE_MODE*: `no` + - *CM_MLPERF_SUBMISSION_MODE*: `yes` + - Workflow: + +
+ + +#### Default variations + +`_all-models,_tflite,_tflite-default` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--find-performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value` +* `--imagenet_path=value` → `IMAGENET_PATH=value` +* `--no-rerun=value` → `CM_MLPERF_NO_RERUN=value` +* `--power=value` → `CM_MLPERF_POWER=value` +* `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value` +* `--submission=value` → `CM_MLPERF_SUBMISSION_MODE=value` +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "find-performance":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_RUN_MOBILENETS: `no` +* CM_MLPERF_RUN_EFFICIENTNETS: `no` +* CM_MLPERF_NO_RERUN: `no` +* CM_MLPERF_RUN_FP32: `yes` +* CM_MLPERF_RUN_INT8: `yes` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models/_cm.json)*** + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-mobilenet-models/_cm.json) + +___ +### Script output +`cmr "run mobilenet models image-classification mobilenet-models mlperf inference [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-mlperf-inference-mobilenet-models/_cm.json b/script/run-mlperf-inference-mobilenet-models/_cm.json new file mode 100644 index 0000000000..29a440f19e --- /dev/null +++ b/script/run-mlperf-inference-mobilenet-models/_cm.json @@ -0,0 +1,167 @@ +{ + "alias": "run-mlperf-inference-mobilenet-models", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "MLPerf benchmark support", + "tags": [ + "run", + "mobilenet", + "models", + "image-classification", + "mobilenet-models", + "mlperf", + "inference" + ], + "deps": [ + { + "tags": "get,sys-utils-cm" + } + ], + "default_env": { + "CM_MLPERF_RUN_MOBILENETS": "no", + "CM_MLPERF_RUN_EFFICIENTNETS": "no", + "CM_MLPERF_NO_RERUN": "no", + "CM_MLPERF_RUN_FP32": "yes", + "CM_MLPERF_RUN_INT8": "yes" + }, + "input_mapping": { + "find-performance": "CM_MLPERF_FIND_PERFORMANCE_MODE", + "submission": "CM_MLPERF_SUBMISSION_MODE", + "results_dir": "CM_MLPERF_INFERENCE_RESULTS_DIR", + "power": "CM_MLPERF_POWER", + "imagenet_path": "IMAGENET_PATH", + "no-rerun": "CM_MLPERF_NO_RERUN", + "submission_dir": "CM_MLPERF_INFERENCE_SUBMISSION_DIR" + }, + "uid": "f21cc993a8b14a58", + "variations": { + "find-performance": { + "group": "run-mode", + "env": { + "CM_MLPERF_FIND_PERFORMANCE_MODE": "yes", + "CM_MLPERF_SUBMISSION_MODE": "no" + } + }, + "accuracy-only": { + "group": "run-mode", + "env": { + "CM_MLPERF_FIND_PERFORMANCE_MODE": "no", + "CM_MLPERF_ACCURACY_MODE": "yes", + "CM_MLPERF_SUBMISSION_MODE": "no" + } + }, + "performance-only": { + "group": "run-mode", + "env": { + "CM_MLPERF_FIND_PERFORMANCE_MODE": "no", + "CM_MLPERF_PERFORMANCE_MODE": "yes", + "CM_MLPERF_SUBMISSION_MODE": "no" + } + }, + "submission": { + "group": "run-mode", + "env": { + "CM_MLPERF_FIND_PERFORMANCE_MODE": "no", + "CM_MLPERF_SUBMISSION_MODE": "yes" + } + }, + "populate-readme": { + "group": "run-mode", + "env": { + "CM_MLPERF_FIND_PERFORMANCE_MODE": "no", + "CM_MLPERF_POPULATE_README": "yes" + } + }, + "all-models": { + "group": "model-selection", + "default": true, + "env": { + "CM_MLPERF_RUN_MOBILENETS": "yes", + "CM_MLPERF_RUN_EFFICIENTNETS": "yes" + } + }, + "mobilenet": { + "group": "model-selection", + "env": { + "CM_MLPERF_RUN_MOBILENETS": "yes" + } + }, + "efficientnet": { + "group": "model-selection", + "env": { + "CM_MLPERF_RUN_EFFICIENTNETS": "yes" + } + }, + "only-fp32": { + "env": { + "CM_MLPERF_RUN_INT8": "no" + } + }, + "only-int8": { + "env": { + "CM_MLPERF_RUN_FP32": "no" + } + }, + "tflite-default": { + "group": "optimization", + "default": true, + "env": { + "CM_MLPERF_TFLITE_DEFAULT_MODE": "yes" + } + }, + "tflite": { + "group": "base-framework", + "default": true + }, + "armnn": { + "env": { + "CM_MLPERF_USE_ARMNN_LIBRARY": "yes" + } + }, + "neon": { + "env": { + "CM_MLPERF_USE_NEON": "yes" + } + }, + "use-neon": { + "alias": "neon" + }, + "opencl": { + "env": { + "CM_MLPERF_USE_OPENCL": "yes" + } + }, + "tflite,armnn": { + "env": { + "CM_MLPERF_TFLITE_ARMNN": "yes" + } + }, + "tflite,armnn,neon": { + "env": { + "CM_MLPERF_TFLITE_ARMNN_NEON": "yes" + } + }, + "tflite,armnn,opencl": { + "env": { + "CM_MLPERF_TFLITE_ARMNN_OPENCL": "yes" + } + } + }, + "docker": { + "run": true, + "fake_run_deps": false, + "docker_input_mapping": { + "imagenet_path": "IMAGENET_PATH", + "results_dir": "RESULTS_DIR", + "submission_dir": "SUBMISSION_DIR" + }, + "mounts": [ + "${{ IMAGENET_PATH }}:${{ IMAGENET_PATH }}", + "${{ RESULTS_DIR }}:/home/cmuser/mobilenet_results", + "${{ SUBMISSION_DIR }}:/home/cmuser/inference_submission_3.1" + ], + "docker_run_final_cmds": [ + "cm run script --tags=run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True --adr.compiler.tags=gcc" + ] + } +} diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py new file mode 100644 index 0000000000..e14e660d5c --- /dev/null +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -0,0 +1,189 @@ +from cmind import utils +import os +import cmind +import sys + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + add_deps_recursive = i['input'].get('add_deps_recursive') + + adr = i['input'].get('adr') + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + verbose = (env.get('CM_VERBOSE', False) == 'yes') + + models = { + "mobilenet": { + "v1": { + "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25" ], + "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], + "kind": [""] + }, + "v2": { + "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35" ], + "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], + "kind": [""] + }, + "v3": { + "multiplier": [""], + "resolution": [""], + "kind": [ "large", "large-minimalistic", "small", "small-minimalistic" ] + } + }, + "efficientnet": { + "": { + "multiplier": [""], + "resolution": [""], + "kind": [ "lite0", "lite1", "lite2", "lite3", "lite4" ] + } + } + } + variation_strings = {} + for t1 in models: + variation_strings[t1] = [] + variation_list = [] + variation_list.append(t1) + for version in models[t1]: + variation_list = [] + if version.strip(): + variation_list.append("_"+version) + variation_list_saved = variation_list.copy() + for k1 in models[t1][version]["multiplier"]: + variation_list = variation_list_saved.copy() + if k1.strip(): + variation_list.append("_"+k1) + variation_list_saved_2 = variation_list.copy() + for k2 in models[t1][version]["resolution"]: + variation_list = variation_list_saved_2.copy() + if k2.strip(): + variation_list.append("_"+k2) + variation_list_saved_3 = variation_list.copy() + for k3 in models[t1][version]["kind"]: + variation_list = variation_list_saved_3.copy() + if k3.strip(): + variation_list.append("_"+k3) + variation_strings[t1].append(",".join(variation_list)) + + if env.get('CM_MLPERF_POPULATE_README','') == "yes": + var="_populate-readme" + execution_mode="valid" + elif env.get('CM_MLPERF_SUBMISSION_MODE','') == "yes": + var="_submission" + execution_mode="valid" + elif env.get('CM_MLPERF_ACCURACY_MODE','') == "yes": + var="_full,_accuracy-only" + execution_mode="valid" + elif env.get('CM_MLPERF_PERFORMANCE_MODE','') == "yes": + var="_full,_performance-only" + execution_mode="valid" + else: + var="_find-performance" + execution_mode="test" + + precisions = [ ] + if env.get('CM_MLPERF_RUN_FP32', '') == "yes": + precisions.append("fp32") + if env.get('CM_MLPERF_RUN_INT8', '') == "yes": + precisions.append("uint8") + + implementation_tags = [] + if env.get('CM_MLPERF_USE_ARMNN_LIBRARY', '') == "yes": + implementation_tags.append("_armnn") + if env.get('CM_MLPERF_TFLITE_ARMNN_NEON', '') == "yes": + implementation_tags.append("_use-neon") + if env.get('CM_MLPERF_TFLITE_ARMNN_OPENCL', '') == "yes": + implementation_tags.append("_use-opencl") + implementation_tags_string = ",".join(implementation_tags) + + inp = i['input'] + + for model in variation_strings: + for v in variation_strings[model]: + for precision in precisions: + + if "small-minimalistic" in v and precision == "uint8": + continue + + if model == "efficientnet" and precision == "uint8": + precision = "int8" + + cm_input = { + 'action': 'run', + 'automation': 'script', + 'tags': f'generate-run-cmds,mlperf,inference,{var}', + 'quiet': True, + 'env': env, + 'input': inp, + 'v': verbose, + 'implementation': 'tflite-cpp', + 'precision': precision, + 'model': model, + 'scenario': 'SingleStream', + 'execution_mode': execution_mode, + 'test_query_count': '100', + 'adr': { + 'tflite-model': { + 'tags': v + }, + 'mlperf-inference-implementation': { + 'tags': implementation_tags_string + } + } + } + if add_deps_recursive: + cm_input['add_deps_recursive'] = add_deps_recursive #script automation will merge adr and add_deps_recursive + + if adr: + utils.merge_dicts({'dict1':cm_input['adr'], 'dict2':adr, 'append_lists':True, 'append_unique':True}) + + if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', '') != '': + cm_input['results_dir'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') != '': + cm_input['submission_dir'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + + if env.get('CM_MLPERF_ACCURACY_MODE','') == "yes": + cm_input['mode'] = 'accuracy' + + if env.get('CM_MLPERF_PERFORMANCE_MODE','') == "yes": + cm_input['mode'] = 'performance' + + if env.get('CM_MLPERF_FIND_PERFORMANCE_MODE','') == "yes" and env.get('CM_MLPERF_NO_RERUN','') != 'yes': + cm_input['rerun'] = True + + if env.get('CM_MLPERF_POWER','') == "yes": + cm_input['power'] = 'yes' + + print(cm_input) + r = cmind.access(cm_input) + if r['return'] > 0: + return r + + if env.get('CM_TEST_ONE_RUN', '') == "yes": + return {'return':0} + + clean_input = { + 'action': 'rm', + 'automation': 'cache', + 'tags': 'get,preprocessed,dataset,_for.mobilenet', + 'quiet': True, + 'v': verbose, + 'f': 'True' + } + r = cmind.access(clean_input) + #if r['return'] > 0: + # return r + return {'return':0} + +def postprocess(i): + + return {'return':0} diff --git a/script/run-mlperf-inference-mobilenet-models/run.sh b/script/run-mlperf-inference-mobilenet-models/run.sh new file mode 100644 index 0000000000..a9bf588e2f --- /dev/null +++ b/script/run-mlperf-inference-mobilenet-models/run.sh @@ -0,0 +1 @@ +#!/bin/bash diff --git a/script/run-mlperf-inference-submission-checker/README-extra.md b/script/run-mlperf-inference-submission-checker/README-extra.md new file mode 100644 index 0000000000..80c2800551 --- /dev/null +++ b/script/run-mlperf-inference-submission-checker/README-extra.md @@ -0,0 +1,10 @@ +# Run MLPerf Inference Submission Checker +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Inference submission checker](https://github.com/mlcommons/inference/blob/master/tools/submission/submission-checker.py) on a given submission folder. + +## How To +```bash +cm run script --tags=run,mlperf,inference,submission,checker --submitter=[SUBMITTER_NAME] --submission_dir=[SUBMISSION_FOLDER] +``` + +### Additional Options +* `[--skip_compliance]:` Skips the compliance tests diff --git a/script/run-mlperf-inference-submission-checker/README.md b/script/run-mlperf-inference-submission-checker/README.md new file mode 100644 index 0000000000..aeb69793d7 --- /dev/null +++ b/script/run-mlperf-inference-submission-checker/README.md @@ -0,0 +1,197 @@ +Automatically generated README for this automation recipe: **run-mlperf-inference-submission-checker** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-mlperf-inference-submission-checker,15d03ec2c1af4297) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker` + +`cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags]` + +*or* + +`cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker"` + +`cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_short-run` + - Environment variables: + - *CM_MLPERF_SHORT_RUN*: `yes` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value` +* `--extra_model_benchmark_map=value` → `CM_MLPERF_EXTRA_MODEL_MAPPING=value` +* `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` +* `--power=value` → `CM_MLPERF_POWER=value` +* `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` +* `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value` +* `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value` +* `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value` +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` +* `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "extra_args":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_SHORT_RUN: `no` + +
+ +#### Versions +Default version: `master` + +* `master` +* `r3.0` +* `r3.1` +* `r4.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src', 'submission-checker-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,generic-python-lib,_xlsxwriter + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_package.pyarrow + * CM names: `--adr.['pyarrow']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,generic-python-lib,_pandas + * CM names: `--adr.['pandas']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,mlperf,submission,dir + * `if (CM_MLPERF_INFERENCE_SUBMISSION_DIR != on)` + * CM names: `--adr.['get-mlperf-submission-dir']...` + - CM script: [get-mlperf-inference-submission-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-submission-dir) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-inference-submission-checker/_cm.json)*** + * publish-results,dashboard + * `if (CM_MLPERF_DASHBOARD == on)` + - CM script: [publish-results-to-dashboard](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/publish-results-to-dashboard) + * publish-results,github + * `if (CM_MLPERF_RESULT_PUSH_TO_GITHUB == on)` + * CM names: `--adr.['push-to-github']...` + - CM script: [push-mlperf-inference-results-to-github](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/push-mlperf-inference-results-to-github) + * run,tar + * `if (CM_TAR_SUBMISSION_DIR == yes)` + - CM script: [tar-my-folder](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/tar-my-folder) + +___ +### Script output +`cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-mlperf-inference-submission-checker/_cm.json b/script/run-mlperf-inference-submission-checker/_cm.json new file mode 100644 index 0000000000..ed3f45a5d3 --- /dev/null +++ b/script/run-mlperf-inference-submission-checker/_cm.json @@ -0,0 +1,140 @@ +{ + "alias": "run-mlperf-inference-submission-checker", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "MLPerf benchmark support", + "default_version": "master", + "clean_files": [ + ], + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "inference-src", + "submission-checker-src" + ], + "tags": "get,mlcommons,inference,src" + }, + { + "tags": "get,generic-python-lib,_xlsxwriter" + }, + { + "names": [ "pyarrow" ], + "tags": "get,generic-python-lib,_package.pyarrow" + }, + { + "names": [ "pandas" ], + "tags": "get,generic-python-lib,_pandas", + "version_min": "1.0.0" + }, + { + "tags": "get,mlperf,submission,dir", + "names": [ + "get-mlperf-submission-dir" + ], + "skip_if_env": { + "CM_MLPERF_INFERENCE_SUBMISSION_DIR": [ "on" ] + } + } + ], + "post_deps": [ + { + "enable_if_env": { + "CM_MLPERF_DASHBOARD": [ + "on" + ] + }, + "tags": "publish-results,dashboard" + }, + { + "enable_if_env": { + "CM_MLPERF_RESULT_PUSH_TO_GITHUB": [ + "on" + ] + }, + "names": [ "push-to-github" ], + "tags": "publish-results,github" + }, + { + "enable_if_env": { + "CM_TAR_SUBMISSION_DIR": [ + "yes" + ] + }, + "tags": "run,tar" + } + ], + "input_mapping": { + "skip_compliance": "CM_MLPERF_SKIP_COMPLIANCE", + "submission_dir": "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "input": "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "submitter": "CM_MLPERF_SUBMITTER", + "src_version": "CM_MLPERF_SUBMISSION_CHECKER_VERSION", + "push_to_github": "CM_MLPERF_RESULT_PUSH_TO_GITHUB", + "extra_model_benchmark_map": "CM_MLPERF_EXTRA_MODEL_MAPPING", + "power": "CM_MLPERF_POWER", + "extra_args": "CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS", + "skip_power_check": "CM_MLPERF_SKIP_POWER_CHECK", + "tar": "CM_TAR_SUBMISSION_DIR" + }, + "default_env": { + "CM_MLPERF_SHORT_RUN": "no" + }, + "tags": [ + "run", + "mlc", + "mlcommons", + "mlperf", + "inference", + "mlperf-inference", + "submission", + "checker", + "submission-checker", + "mlc-submission-checker" + ], + "uid": "15d03ec2c1af4297", + "variations": { + "short-run": { + "env": { + "CM_MLPERF_SHORT_RUN": "yes" + } + } + }, + "versions": { + "master": { + "adr": { + "submission-checker-src": { + "version": "master" + } + } + }, + "r3.0": { + "adr": { + "submission-checker-src": { + "version": "r3.0" + } + } + }, + "r3.1": { + "adr": { + "submission-checker-src": { + "version": "r3.1" + } + } + }, + "r4.0": { + "adr": { + "submission-checker-src": { + "version": "r4.0" + } + } + } + } +} diff --git a/script/run-mlperf-inference-submission-checker/code.py b/script/run-mlperf-inference-submission-checker/code.py new file mode 100644 index 0000000000..892d16be33 --- /dev/null +++ b/script/run-mlperf-inference-submission-checker/code.py @@ -0,0 +1,27 @@ +# Developer: Grigori Fursin + +import os +import pandas + +def main(): + print ('=========================================================') + + print ('Searching for summary.csv ...') + + if os.path.isfile('summary.csv'): + print ('Converting to json ...') + + import pandas + + df = pandas.read_csv('summary.csv').T + + print ('') + print (df) + print ('') + + df.to_json('summary.json', orient='columns', indent=4) + + print ('=========================================================') + +if __name__ == '__main__': + main() diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py new file mode 100644 index 0000000000..f9158bf06a --- /dev/null +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -0,0 +1,92 @@ +from cmind import utils +import cmind as cm +import os +import subprocess + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + + version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION','') + + if submission_dir == "": + return {'return': 1, 'error': 'Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR'} + + submitter = env.get("CM_MLPERF_SUBMITTER", "") #"default") + if ' ' in submitter: + return {'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + + if 'CM_MLPERF_SKIP_COMPLIANCE' in env: + skip_compliance = " --skip_compliance" + else: + skip_compliance = "" + + submission_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "submission_checker.py") + + if env['CM_MLPERF_SHORT_RUN'] == "yes": + import shutil + new_submission_checker_file = os.path.join(os.path.dirname(submission_checker_file), "submission_checker1.py") + with open(submission_checker_file, 'r') as file: + data = file.read() + data = data.replace("OFFLINE_MIN_SPQ = 24576", "OFFLINE_MIN_SPQ = 100") + data = data.replace("return is_valid, res, inferred", "return True, res, inferred") + with open(new_submission_checker_file, 'w') as file: + file.write(data) + submission_checker_file = new_submission_checker_file + + if env.get('CM_MLPERF_EXTRA_MODEL_MAPPING', '') != '': + extra_map = ' --extra_model_benchmark_map "'+env['CM_MLPERF_EXTRA_MODEL_MAPPING']+'"' + else: + extra_map = "" + + if env.get('CM_MLPERF_SKIP_POWER_CHECK', 'no') == "yes": + power_check = " --skip-power-check" + else: + power_check = "" + + extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS','') + + x_submitter = ' --submitter "' + submitter + '" ' if submitter!='' else '' + + x_version = ' --version ' + version +' ' if version!='' else '' + + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + submission_checker_file + ' --input "' + submission_dir + '"' + \ + x_submitter + \ + x_version + \ + skip_compliance + extra_map + power_check + extra_args + + report_generator_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "generate_final_report.py") + env['CM_RUN_CMD'] = CMD + env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + report_generator_file + ' --input summary.csv' + + return {'return':0} + +def postprocess(i): + + env = i['env'] + if env.get('CM_TAR_SUBMISSION_DIR',''): + env['CM_TAR_INPUT_DIR'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + + x=env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE','') + if x!='': + env['CM_TAR_OUTFILE']=x + + x=env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY','') + if x!='': + for y in ['.csv', '.json', '.xlsx']: + + z0 = 'summary'+y + + if os.path.isfile(z0): + z1 = x+y + + if os.path.isfile(z1): + os.remove(z1) + + os.rename(z0, z1) + + return {'return':0} diff --git a/script/run-mlperf-inference-submission-checker/run.bat b/script/run-mlperf-inference-submission-checker/run.bat new file mode 100644 index 0000000000..5cbc264a2e --- /dev/null +++ b/script/run-mlperf-inference-submission-checker/run.bat @@ -0,0 +1,6 @@ +echo "%CM_RUN_CMD%" +%CM_RUN_CMD% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/run-mlperf-inference-submission-checker/run.sh b/script/run-mlperf-inference-submission-checker/run.sh new file mode 100644 index 0000000000..82434a83b0 --- /dev/null +++ b/script/run-mlperf-inference-submission-checker/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + +cmd=${CM_POST_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/script/run-mlperf-power-client/README-extra.md b/script/run-mlperf-power-client/README-extra.md new file mode 100644 index 0000000000..d13278d9ba --- /dev/null +++ b/script/run-mlperf-power-client/README-extra.md @@ -0,0 +1,15 @@ +# Run MLPerf Power Client Script +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Power Server script](https://github.com/mlcommons/power-dev/tree/master/ptd_client_server). + +## How To +```bash +cm run script --tags=run,mlperf,power,client [--log_dir=<> --power_server=<> \ +--loadgen_logs_dir=<> --ntp_server=<> --run_cmd=<>] +``` + +### Default Values +1. `log_dir`: `logs` +2. `power_server`: `localhost` +3. `loadgen_logs_dir`: `loadgen_logs`, +4. `ntp_server`: `time.google.com` +5. `run_cmd`: `dummy.sh` diff --git a/script/run-mlperf-power-client/README.md b/script/run-mlperf-power-client/README.md new file mode 100644 index 0000000000..f2de841c56 --- /dev/null +++ b/script/run-mlperf-power-client/README.md @@ -0,0 +1,156 @@ +Automatically generated README for this automation recipe: **run-mlperf-power-client** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-mlperf-power-client,bf6a6d0cc97b48ae) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mlc,mlcommons,mlperf,power,client,power-client* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf power client power-client" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mlc,mlcommons,mlperf,power,client,power-client` + +`cm run script --tags=run,mlc,mlcommons,mlperf,power,client,power-client [--input_flags]` + +*or* + +`cmr "run mlc mlcommons mlperf power client power-client"` + +`cmr "run mlc mlcommons mlperf power client power-client " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,power,client,power-client' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mlc,mlcommons,mlperf,power,client,power-client"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mlc,mlcommons,mlperf,power,client,power-client) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mlc mlcommons mlperf power client power-client" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--loadgen_logs_dir=value` → `CM_MLPERF_LOADGEN_LOGS_DIR=value` +* `--log_dir=value` → `CM_MLPERF_POWER_LOG_DIR=value` +* `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` +* `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` +* `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` +* `--port=value` → `CM_MLPERF_POWER_SERVER_PORT=value` +* `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` +* `--run_cmd=value` → `CM_MLPERF_RUN_CMD=value` +* `--server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` +* `--server_port=value` → `CM_MLPERF_POWER_SERVER_PORT=value` +* `--timestamp=value` → `CM_MLPERF_POWER_TIMESTAMP=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "loadgen_logs_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_POWER_LOG_DIR: `logs` +* CM_MLPERF_RUN_CMD: `` +* CM_MLPERF_POWER_SERVER_ADDRESS: `localhost` +* CM_MLPERF_POWER_NTP_SERVER: `time.google.com` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlperf,power,src + * CM names: `--adr.['power-src']...` + - CM script: [get-mlperf-power-dev](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-power-dev) + * get,generic-sys-util,_ntpdate + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-client/_cm.json) + +___ +### Script output +`cmr "run mlc mlcommons mlperf power client power-client " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-mlperf-power-client/_cm.json b/script/run-mlperf-power-client/_cm.json new file mode 100644 index 0000000000..958b437c89 --- /dev/null +++ b/script/run-mlperf-power-client/_cm.json @@ -0,0 +1,55 @@ +{ + "alias": "run-mlperf-power-client", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "MLPerf benchmark support", + "clean_files": [], + "default_env": { + "CM_MLPERF_POWER_LOG_DIR": "logs", + "CM_MLPERF_RUN_CMD": "", + "CM_MLPERF_POWER_SERVER_ADDRESS": "localhost", + "CM_MLPERF_POWER_NTP_SERVER": "time.google.com" + }, + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "power-src" + ], + "tags": "get,mlperf,power,src" + }, + { + "tags": "get,generic-sys-util,_ntpdate" + } + ], + "input_mapping": { + "log_dir": "CM_MLPERF_POWER_LOG_DIR", + "power_server": "CM_MLPERF_POWER_SERVER_ADDRESS", + "server_port": "CM_MLPERF_POWER_SERVER_PORT", + "server": "CM_MLPERF_POWER_SERVER_ADDRESS", + "port": "CM_MLPERF_POWER_SERVER_PORT", + "loadgen_logs_dir": "CM_MLPERF_LOADGEN_LOGS_DIR", + "ntp_server": "CM_MLPERF_POWER_NTP_SERVER", + "run_cmd": "CM_MLPERF_RUN_CMD", + "max_amps": "CM_MLPERF_POWER_MAX_AMPS", + "max_volts": "CM_MLPERF_POWER_MAX_VOLTS", + "timestamp": "CM_MLPERF_POWER_TIMESTAMP" + }, + "tags": [ + "run", + "mlc", + "mlcommons", + "mlperf", + "power", + "client", + "power-client" + ], + "uid": "bf6a6d0cc97b48ae" +} diff --git a/script/run-mlperf-power-client/customize.py b/script/run-mlperf-power-client/customize.py new file mode 100644 index 0000000000..6ec752b803 --- /dev/null +++ b/script/run-mlperf-power-client/customize.py @@ -0,0 +1,43 @@ +from cmind import utils +import cmind as cm +import os +import configparser + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if not env['CM_MLPERF_RUN_CMD']: + env['CM_MLPERF_RUN_CMD'] = os.path.join(i['run_script_input']['path'], "dummy.sh") + + if 'CM_MLPERF_POWER_TIMESTAMP' in env: + timestamp = "" + else: + timestamp = " --no-timestamp-path" + + if 'CM_MLPERF_LOADGEN_LOGS_DIR' not in env: + env['CM_MLPERF_LOADGEN_LOGS_DIR'] = os.path.join(os.getcwd(), "loadgen_logs") + + run_cmd = env['CM_MLPERF_RUN_CMD'].replace("'", '"') + run_cmd = run_cmd.replace('"', '\\"') + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' +\ + os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'client.py') + \ + " -a " + env['CM_MLPERF_POWER_SERVER_ADDRESS'] + \ + " -p " + env.get('CM_MLPERF_POWER_SERVER_PORT', "4950") + \ + " -w '" + run_cmd + \ + "' -L " + env['CM_MLPERF_LOADGEN_LOGS_DIR'] + \ + " -o " + env['CM_MLPERF_POWER_LOG_DIR'] + \ + " -n " + env['CM_MLPERF_POWER_NTP_SERVER'] + \ + timestamp + + if 'CM_MLPERF_POWER_MAX_AMPS' in env and 'CM_MLPERF_POWER_MAX_VOLTS' in env: + cmd = cmd + " --max-amps " + env['CM_MLPERF_POWER_MAX_AMPS'] + \ + " --max-volts " + env['CM_MLPERF_POWER_MAX_VOLTS'] + + env['CM_MLPERF_POWER_RUN_CMD'] = cmd + + return {'return':0} + +def postprocess(i): + return {'return':0} diff --git a/script/run-mlperf-power-client/dummy.sh b/script/run-mlperf-power-client/dummy.sh new file mode 100644 index 0000000000..a796ab609b --- /dev/null +++ b/script/run-mlperf-power-client/dummy.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +mkdir -p loadgen_logs + +# Create mock files with the same names that loadgen does + +echo power_begin $(date --utc +"%m-%d-%Y %T.%3N") | tee loadgen_logs/mlperf_log_detail.txt +touch loadgen_logs/mlperf_log_accuracy.json +touch loadgen_logs/mlperf_log_summary.txt +touch loadgen_logs/mlperf_log_trace.json +sleep 25 +echo power_end $(date --utc +"%m-%d-%Y %T.%3N") | tee -a loadgen_logs/mlperf_log_detail.txt diff --git a/script/run-mlperf-power-client/run.sh b/script/run-mlperf-power-client/run.sh new file mode 100644 index 0000000000..19805cb5b2 --- /dev/null +++ b/script/run-mlperf-power-client/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [[ -n ${CM_RUN_DIR} ]]; then + cur_dir=${CM_RUN_DIR}; + cd $cur_dir +else + cur_dir=`pwd` +fi +echo "Running power client from $cur_dir" + +cmd="${CM_MLPERF_POWER_RUN_CMD}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/run-mlperf-power-server/README-extra.md b/script/run-mlperf-power-server/README-extra.md new file mode 100644 index 0000000000..78b0457f70 --- /dev/null +++ b/script/run-mlperf-power-server/README-extra.md @@ -0,0 +1,17 @@ +# Run MLPerf Power Server Script +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Power Server script](https://github.com/mlcommons/power-dev/tree/master/ptd_client_server). + +## How To +```bash +cm run script --tags=run,mlperf,power,server [--interface_flag=<> \ +--device_port=<> --outdir=<> --logfile=<> --outdir=<> --device_type=<> ] +``` + +### Default Values +1. `ntp_server`: `time.google.com` +2. `interface_flag`: "" +3. `device_port`: `/dev/usbtmc0` +4. `device_type`: `49` +5. `outdir`: `~/mlperf_power_logs` +6. `logfile`: `logs_ptdaemon.txt` + diff --git a/script/run-mlperf-power-server/README.md b/script/run-mlperf-power-server/README.md new file mode 100644 index 0000000000..d4da088627 --- /dev/null +++ b/script/run-mlperf-power-server/README.md @@ -0,0 +1,165 @@ +Automatically generated README for this automation recipe: **run-mlperf-power-server** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-mlperf-power-server,5bc68aaf389a40bd) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mlc,mlcommons,mlperf,power,server,power-server* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf power server power-server" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mlc,mlcommons,mlperf,power,server,power-server` + +`cm run script --tags=run,mlc,mlcommons,mlperf,power,server,power-server [--input_flags]` + +*or* + +`cmr "run mlc mlcommons mlperf power server power-server"` + +`cmr "run mlc mlcommons mlperf power server power-server " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,power,server,power-server' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mlc,mlcommons,mlperf,power,server,power-server"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mlc,mlcommons,mlperf,power,server,power-server) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mlc mlcommons mlperf power server power-server" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--device_port=value` → `CM_MLPERF_POWER_DEVICE_PORT=value` +* `--device_type=value` → `CM_MLPERF_POWER_DEVICE_TYPE=value` +* `--interface_flag=value` → `CM_MLPERF_POWER_INTERFACE_FLAG=value` +* `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` +* `--screen=value` → `CM_MLPERF_POWER_SERVER_USE_SCREEN=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "device_port":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_POWER_NTP_SERVER: `time.google.com` +* CM_MLPERF_POWER_INTERFACE_FLAG: `` +* CM_MLPERF_POWER_DEVICE_TYPE: `49` +* CM_MLPERF_POWER_SERVER_ADDRESS: `0.0.0.0` +* CM_MLPERF_POWER_SERVER_PORT: `4950` +* CM_MLPERF_POWER_DEVICE_PORT: `/dev/usbtmc0` +* CM_MLPERF_POWER_SERVER_USE_SCREEN: `no` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,mlperf,power,src + * CM names: `--adr.['power-src']...` + - CM script: [get-mlperf-power-dev](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-power-dev) + * get,mlperf,power,daemon + * CM names: `--adr.['power-damenon']...` + - CM script: [get-spec-ptd](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-spec-ptd) + * get,generic,sys-util,_screen + * `if (CM_HOST_OS_TYPE not in windows)` + * CM names: `--adr.['screen']...` + - CM script: [get-generic-sys-util](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-sys-util) + * get,generic-python-lib,_package.pypiwin32 + * `if (CM_HOST_OS_TYPE in windows)` + * CM names: `--adr.['win32']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-power-server/_cm.json) + +___ +### Script output +`cmr "run mlc mlcommons mlperf power server power-server " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-mlperf-power-server/_cm.json b/script/run-mlperf-power-server/_cm.json new file mode 100644 index 0000000000..5082ca3f05 --- /dev/null +++ b/script/run-mlperf-power-server/_cm.json @@ -0,0 +1,82 @@ +{ + "alias": "run-mlperf-power-server", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "MLPerf benchmark support", + "clean_files": [], + "default_env": { + "CM_MLPERF_POWER_NTP_SERVER": "time.google.com", + "CM_MLPERF_POWER_INTERFACE_FLAG": "", + "CM_MLPERF_POWER_DEVICE_TYPE": "49", + "CM_MLPERF_POWER_SERVER_ADDRESS": "0.0.0.0", + "CM_MLPERF_POWER_SERVER_PORT": "4950", + "CM_MLPERF_POWER_DEVICE_PORT": "/dev/usbtmc0", + "CM_MLPERF_POWER_SERVER_USE_SCREEN": "no" + }, + "input_mapping": { + "interface_flag": "CM_MLPERF_POWER_INTERFACE_FLAG", + "device_port": "CM_MLPERF_POWER_DEVICE_PORT", + "device_type": "CM_MLPERF_POWER_DEVICE_TYPE", + "ntp_server": "CM_MLPERF_POWER_NTP_SERVER", + "screen": "CM_MLPERF_POWER_SERVER_USE_SCREEN" + }, + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "detect,os" + }, + { + "names": [ + "power-src" + ], + "tags": "get,mlperf,power,src" + }, + { + "names": [ + "power-damenon" + ], + "tags": "get,mlperf,power,daemon" + }, + { + "names": [ + "screen" + ], + "tags": "get,generic,sys-util,_screen", + "skip_if_env": { + "CM_HOST_OS_TYPE": "windows" + } + }, + { + "names": [ + "win32" + ], + "tags": "get,generic-python-lib,_package.pypiwin32", + "enable_if_env": { + "CM_HOST_OS_TYPE": "windows" + } + } + ], + "tags": [ + "run", + "mlc", + "mlcommons", + "mlperf", + "power", + "server", + "power-server" + ], + "uid": "5bc68aaf389a40bd", + "docker": { + "device": "/dev/usbtmc0", + "port_maps": [ + "4950:4950" + ] + } +} diff --git a/script/run-mlperf-power-server/customize.py b/script/run-mlperf-power-server/customize.py new file mode 100644 index 0000000000..65c7830420 --- /dev/null +++ b/script/run-mlperf-power-server/customize.py @@ -0,0 +1,39 @@ +from cmind import utils +import cmind as cm +import os +import configparser + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + config = configparser.ConfigParser() + server_config_file = os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'server.template.conf') + config.read(server_config_file) + config['server']['ntpServer'] = env['CM_MLPERF_POWER_NTP_SERVER'] + config['server']['listen'] = env['CM_MLPERF_POWER_SERVER_ADDRESS'] + " " + env['CM_MLPERF_POWER_SERVER_PORT'] + config['ptd']['ptd'] = env['CM_MLPERF_PTD_PATH'] + config['ptd']['interfaceFlag'] = env['CM_MLPERF_POWER_INTERFACE_FLAG'] + config['ptd']['deviceType'] = env['CM_MLPERF_POWER_DEVICE_TYPE'] + config['ptd']['devicePort'] = env['CM_MLPERF_POWER_DEVICE_PORT'] + with open('power-server.conf', 'w') as configfile: + config.write(configfile) + print({section: dict(config[section]) for section in config.sections()}) + + if env['CM_HOST_OS_TYPE'] == "windows": + cmd_prefix = "" + else: + cmd_prefix = "sudo " + + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'server.py') +' -c power-server.conf' + if env.get('CM_MLPERF_POWER_SERVER_USE_SCREEN', 'no') == 'yes': + cmd = cmd_prefix + ' screen -d -m ' + cmd + ' ' + else: + cmd = cmd_prefix + cmd + + env['RUN_CMD'] = cmd + + return {'return':0} + +def postprocess(i): + return {'return':0} diff --git a/script/run-mlperf-power-server/run.bat b/script/run-mlperf-power-server/run.bat new file mode 100644 index 0000000000..d23f0addf5 --- /dev/null +++ b/script/run-mlperf-power-server/run.bat @@ -0,0 +1,7 @@ +@echo off + +echo %RUN_CMD% + +%RUN_CMD% + +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/run-mlperf-power-server/run.sh b/script/run-mlperf-power-server/run.sh new file mode 100644 index 0000000000..1c5f07f330 --- /dev/null +++ b/script/run-mlperf-power-server/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmd=${RUN_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/script/run-mlperf-training-submission-checker/README.md b/script/run-mlperf-training-submission-checker/README.md new file mode 100644 index 0000000000..65d7fca513 --- /dev/null +++ b/script/run-mlperf-training-submission-checker/README.md @@ -0,0 +1,181 @@ +Automatically generated README for this automation recipe: **run-mlperf-training-submission-checker** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-mlperf-training-submission-checker,cb5cb60ac9a74d09) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker` + +`cm run script --tags=run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags]` + +*or* + +`cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker"` + +`cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_short-run` + - Environment variables: + - *CM_MLPERF_SHORT_RUN*: `yes` + - Workflow: + +
+ + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value` +* `--input=value` → `CM_MLPERF_SUBMISSION_DIR=value` +* `--power=value` → `CM_MLPERF_POWER=value` +* `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` +* `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value` +* `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value` +* `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value` +* `--submission_dir=value` → `CM_MLPERF_SUBMISSION_DIR=value` +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` +* `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "extra_args":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_SHORT_RUN: `no` + +
+ +#### Versions +Default version: `master` + +* `master` +* `r3.0` +* `r3.1` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src', 'submission-checker-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * install,mlperf,logging,from.src + - CM script: [install-mlperf-logging-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-mlperf-logging-from-src) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-mlperf-training-submission-checker/_cm.json)*** + * publish-results,github + * `if (CM_MLPERF_RESULT_PUSH_TO_GITHUB == on)` + * CM names: `--adr.['push-to-github']...` + - CM script: [push-mlperf-inference-results-to-github](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/push-mlperf-inference-results-to-github) + * run,tar + * `if (CM_TAR_SUBMISSION_DIR == yes)` + - CM script: [tar-my-folder](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/tar-my-folder) + +___ +### Script output +`cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-mlperf-training-submission-checker/_cm.json b/script/run-mlperf-training-submission-checker/_cm.json new file mode 100644 index 0000000000..4c0854fb48 --- /dev/null +++ b/script/run-mlperf-training-submission-checker/_cm.json @@ -0,0 +1,108 @@ +{ + "alias": "run-mlperf-training-submission-checker", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "MLPerf benchmark support", + "clean_files": [], + "default_env": { + "CM_MLPERF_SHORT_RUN": "no" + }, + "default_version": "master", + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "inference-src", + "submission-checker-src" + ], + "tags": "get,mlcommons,inference,src" + }, + { + "tags": "install,mlperf,logging,from.src" + } + ], + "input_mapping": { + "extra_args": "CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS", + "input": "CM_MLPERF_SUBMISSION_DIR", + "power": "CM_MLPERF_POWER", + "push_to_github": "CM_MLPERF_RESULT_PUSH_TO_GITHUB", + "skip_compliance": "CM_MLPERF_SKIP_COMPLIANCE", + "skip_power_check": "CM_MLPERF_SKIP_POWER_CHECK", + "src_version": "CM_MLPERF_SUBMISSION_CHECKER_VERSION", + "submission_dir": "CM_MLPERF_SUBMISSION_DIR", + "submitter": "CM_MLPERF_SUBMITTER", + "tar": "CM_TAR_SUBMISSION_DIR" + }, + "post_deps": [ + { + "enable_if_env": { + "CM_MLPERF_RESULT_PUSH_TO_GITHUB": [ + "on" + ] + }, + "names": [ + "push-to-github" + ], + "tags": "publish-results,github" + }, + { + "enable_if_env": { + "CM_TAR_SUBMISSION_DIR": [ + "yes" + ] + }, + "tags": "run,tar" + } + ], + "tags": [ + "run", + "mlc", + "mlcommons", + "mlperf", + "training", + "train", + "mlperf-training", + "submission", + "checker", + "submission-checker", + "mlc-submission-checker" + ], + "uid": "cb5cb60ac9a74d09", + "variations": { + "short-run": { + "env": { + "CM_MLPERF_SHORT_RUN": "yes" + } + } + }, + "versions": { + "master": { + "adr": { + "submission-checker-src": { + "version": "master" + } + } + }, + "r3.1": { + "adr": { + "submission-checker-src": { + "version": "r3.1" + } + } + }, + "r3.0": { + "adr": { + "submission-checker-src": { + "version": "r3.0" + } + } + } + } +} diff --git a/script/run-mlperf-training-submission-checker/customize.py b/script/run-mlperf-training-submission-checker/customize.py new file mode 100644 index 0000000000..393979b490 --- /dev/null +++ b/script/run-mlperf-training-submission-checker/customize.py @@ -0,0 +1,37 @@ +from cmind import utils +import cmind as cm +import os +import subprocess + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + submission_dir = env.get("CM_MLPERF_SUBMISSION_DIR", "") + + version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION','v3.1') + + if submission_dir == "": + return {'return': 1, 'error': 'Please set CM_MLPERF_SUBMISSION_DIR'} + + submitter = env.get("CM_MLPERF_SUBMITTER", "") #"default") + if ' ' in submitter: + return {'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + + submission_checker_file = os.path.join(env['CM_MLPERF_LOGGING_REPO_PATH'], "scripts", "verify_for_" + version + "_training.sh") + + extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS','') + + CMD = submission_checker_file + " " + submission_dir + + env['CM_RUN_CMD'] = CMD + + return {'return':0} + +def postprocess(i): + + env = i['env'] + if env.get('CM_TAR_SUBMISSION_DIR'): + env['CM_TAR_INPUT_DIR'] = env.get('CM_MLPERF_SUBMISSION_DIR', '$HOME') + + return {'return':0} diff --git a/script/run-mlperf-training-submission-checker/run.sh b/script/run-mlperf-training-submission-checker/run.sh new file mode 100644 index 0000000000..8784f35044 --- /dev/null +++ b/script/run-mlperf-training-submission-checker/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + +cmd=${CM_POST_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? diff --git a/script/run-python/README.md b/script/run-python/README.md new file mode 100644 index 0000000000..54bf6a7446 --- /dev/null +++ b/script/run-python/README.md @@ -0,0 +1,140 @@ +Automatically generated README for this automation recipe: **run-python** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-python,75a46d84ee6f49b0) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-python)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,python* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run python" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,python` + +`cm run script --tags=run,python [--input_flags]` + +*or* + +`cmr "run python"` + +`cmr "run python " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,python' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,python"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,python) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run python" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--command=value` → `CM_RUN_PYTHON_CMD=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "command":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-python/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-python/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-python/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-python/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-python/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-python/_cm.json) + +___ +### Script output +`cmr "run python " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/run-python/_cm.json b/script/run-python/_cm.json new file mode 100644 index 0000000000..dff0c12fec --- /dev/null +++ b/script/run-python/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "run-python", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "input_mapping": { + "command": "CM_RUN_PYTHON_CMD" + }, + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ], + "tags": [ + "run", + "python" + ], + "uid": "75a46d84ee6f49b0" +} diff --git a/script/run-python/run.bat b/script/run-python/run.bat new file mode 100644 index 0000000000..95d32d577f --- /dev/null +++ b/script/run-python/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_RUN_PYTHON_CMD% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/run-python/run.sh b/script/run-python/run.sh new file mode 100644 index 0000000000..641095ae88 --- /dev/null +++ b/script/run-python/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_RUN_PYTHON_CMD} +test $? -eq 0 || exit $? diff --git a/script/run-terraform/README-about.md b/script/run-terraform/README-about.md new file mode 100644 index 0000000000..f890c6170b --- /dev/null +++ b/script/run-terraform/README-about.md @@ -0,0 +1,12 @@ +## Setup for Google Cloud Instances +``` +sudo snap install google-cloud-cli --classic +gcloud auth application-default login +``` + +The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. + +``` +cm run script --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +``` +Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) diff --git a/script/run-terraform/README-extra.md b/script/run-terraform/README-extra.md new file mode 100644 index 0000000000..47c1f4f30d --- /dev/null +++ b/script/run-terraform/README-extra.md @@ -0,0 +1 @@ +Please copy aws/credentials.example to aws/credentials.sh file after adding your AWS credentials diff --git a/script/run-terraform/README.md b/script/run-terraform/README.md new file mode 100644 index 0000000000..10e880a2db --- /dev/null +++ b/script/run-terraform/README.md @@ -0,0 +1,482 @@ +Automatically generated README for this automation recipe: **run-terraform** + +Category: **Cloud automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=run-terraform,ec344bd44af144d7) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- + +## Setup for Google Cloud Instances +``` +sudo snap install google-cloud-cli --classic +gcloud auth application-default login +``` + +The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. + +``` +cm run script --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +``` +Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) + + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,terraform* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run terraform" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,terraform` + +`cm run script --tags=run,terraform[,variations] [--input_flags]` + +*or* + +`cmr "run terraform"` + +`cmr "run terraform [variations]" [--input_flags]` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,terraform' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,terraform"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,terraform) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run terraform[variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_amazon-linux-2-kernel.#` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE_OS*: `amazon-linux-2-kernel.#` + - Workflow: + * `_graviton` + - Environment variables: + - *CM_TERRAFORM_AWS_GRAVITON_INSTANCE*: `yes` + - Workflow: + * `_inferentia` + - Environment variables: + - *CM_TERRAFORM_AWS_INFERENTIA_INSTANCE*: `yes` + - Workflow: + * `_inferentia,amazon-linux-2-kernel.510` + - Workflow: + * `_rhel.#` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE_OS*: `rhel.#` + - Workflow: + * `_ubuntu.#` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE_OS*: `ubuntu.#` + - Workflow: + +
+ + + * Group "**aws-instance-image**" +
+ Click here to expand this section. + + * `_amazon-linux-2-kernel.510,arm64,us-west-2` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `ami-0f1a5f5ada0e7da53` + - Workflow: + * `_aws_instance_image.#` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `#` + - Workflow: + * `_aws_instance_image.ami-0735c191cf914754d` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `ami-0735c191cf914754d` + - Workflow: + * `_aws_instance_image.ami-0a0d8589b597d65b3` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `ami-0a0d8589b597d65b3` + - Workflow: + * `_rhel.9,x86,us-west-2` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `ami-0dda7e535b65b6469` + - Workflow: + * `_ubuntu.2204,arm64,us-west-2` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `ami-079f51a7bcca65b92` + - Workflow: + * `_ubuntu.2204,x86,us-west-2` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `ami-0735c191cf914754d` + - Workflow: + +
+ + + * Group "**aws-instance-type**" +
+ Click here to expand this section. + + * `_a1.2xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `a1.2xlarge` + - Workflow: + * `_a1.metal` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `a1.metal` + - Workflow: + * `_a1.xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `a1.xlarge` + - Workflow: + * `_aws_instance_type.#` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `#` + - Workflow: + * `_c5.12xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `c5.12xlarge` + - Workflow: + * `_c5.4xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `c5.4xlarge` + - Workflow: + * `_c5d.9xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `c5d.9xlarge` + - Workflow: + * `_g4dn.xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `g4dn.xlarge` + - Workflow: + * `_inf1.2xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `inf1.2xlarge` + - Workflow: + * `_inf1.xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `inf1.xlarge` + - Workflow: + * `_inf2.8xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `inf2.8xlarge` + - Workflow: + * `_inf2.xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `inf2.xlarge` + - Workflow: + * `_m7g.2xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `m7g.2xlarge` + - Workflow: + * `_m7g.xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `m7g.xlarge` + - Workflow: + * `_t2.#` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.#` + - Workflow: + * `_t2.2xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.2xlarge` + - Workflow: + * `_t2.large` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.large` + - Workflow: + * `_t2.medium` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.medium` + - Workflow: + * `_t2.micro` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.micro` + - Workflow: + * `_t2.nano` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.nano` + - Workflow: + * `_t2.small` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.small` + - Workflow: + * `_t2.xlarge` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `t2.xlarge` + - Workflow: + +
+ + + * Group "**cloud-provider**" +
+ Click here to expand this section. + + * **`_aws`** (default) + - Environment variables: + - *CM_TERRAFORM_CONFIG_DIR_NAME*: `aws` + - Workflow: + * `_gcp` + - Environment variables: + - *CM_TERRAFORM_CONFIG_DIR_NAME*: `gcp` + - Workflow: + +
+ + + * Group "**gcp-instance-image**" +
+ Click here to expand this section. + + * `_debian-cloud/debian-11` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `debian-cloud/debian-11` + - Workflow: + * `_gcp_instance_image.#` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `#` + - Workflow: + * `_ubuntu-2204-jammy-v20230114` + - Environment variables: + - *TF_VAR_INSTANCE_IMAGE*: `ubuntu-2204-jammy-v20230114` + - Workflow: + +
+ + + * Group "**gcp-instance-type**" +
+ Click here to expand this section. + + * `_f1-micro` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `f1-micro` + - Workflow: + * `_gcp_instance_type.#` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `#` + - Workflow: + * `_n1-highmem.#` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `n1-highmem-#` + - Workflow: + * `_n1-standard.#` + - Environment variables: + - *TF_VAR_INSTANCE_TYPE*: `n1-highmem-#` + - Workflow: + +
+ + + * Group "**gcp-project**" +
+ Click here to expand this section. + + * `_gcp_project.#` + - Environment variables: + - *TF_VAR_GCP_PROJECT*: `#` + - Workflow: + +
+ + + * Group "**instance-name**" +
+ Click here to expand this section. + + * `_instance_name.#` + - Environment variables: + - *TF_VAR_INSTANCE_NAME*: `#` + - Workflow: + +
+ + + * Group "**platform**" +
+ Click here to expand this section. + + * `_arm64` + - Environment variables: + - *CM_INSTANCE_PLATFORM*: `arm64` + - Workflow: + * **`_x86`** (default) + - Environment variables: + - *CM_INSTANCE_PLATFORM*: `x86` + - Workflow: + +
+ + + * Group "**region**" +
+ Click here to expand this section. + + * `_region.#` + - Environment variables: + - *TF_VAR_INSTANCE_REGION*: `#` + - Workflow: + * `_us-west-2` + - Environment variables: + - *TF_VAR_INSTANCE_REGION*: `us-west-2` + - Workflow: + +
+ + + * Group "**storage-size**" +
+ Click here to expand this section. + + * `_storage_size.#` + - Environment variables: + - *TF_VAR_DISK_GBS*: `#` + - Workflow: + * `_storage_size.8` + - Environment variables: + - *TF_VAR_DISK_GBS*: `8` + - Workflow: + +
+ + + * Group "**zone**" +
+ Click here to expand this section. + + * `_zone.#` + - Environment variables: + - *TF_VAR_INSTANCE_ZONE*: `#` + - Workflow: + +
+ + +#### Default variations + +`_aws,_x86` + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--cminit=value` → `CM_TERRAFORM_CM_INIT=value` +* `--destroy=value` → `CM_DESTROY_TERRAFORM=value` +* `--gcp_credentials_json_file=value` → `CM_GCP_CREDENTIALS_JSON_PATH=value` +* `--key_file=value` → `CM_SSH_KEY_FILE=value` +* `--run_cmds=value` → `CM_TERRAFORM_RUN_COMMANDS=value` +* `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "cminit":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* TF_VAR_SECURITY_GROUP_ID: `sg-0783752c97d2e011d` +* TF_VAR_CPU_COUNT: `1` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform/_cm.json)*** + * get,terraform + - CM script: [get-terraform](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-terraform) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/run-terraform/_cm.json)*** + * destroy,terraform + * `if (CM_DESTROY_TERRAFORM == on)` + * CM names: `--adr.['destroy-cmd']...` + - CM script: [destroy-terraform](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/destroy-terraform) + +___ +### Script output +`cmr "run terraform [,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_TERRAFORM_CONFIG_DIR` +* `CM_TERRAFORM_RUN_DIR` +#### New environment keys auto-detected from customize + +* `CM_TERRAFORM_CONFIG_DIR` +* `CM_TERRAFORM_RUN_DIR` \ No newline at end of file diff --git a/script/run-terraform/_cm.json b/script/run-terraform/_cm.json new file mode 100644 index 0000000000..243ef4a8ad --- /dev/null +++ b/script/run-terraform/_cm.json @@ -0,0 +1,470 @@ +{ + "alias": "run-terraform", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Cloud automation", + "clean_files": [], + "deps": [ + { + "tags": "get,terraform" + } + ], + "tags": [ + "run", + "terraform" + ], + "new_env_keys": [ + "CM_TERRAFORM_RUN_DIR", + "CM_TERRAFORM_CONFIG_DIR" + ], + "new_state_keys": [ + "CM_TF_NEW_INSTANCES_STATE" + ], + "uid": "ec344bd44af144d7", + "post_deps": [ + { + "names": [ + "destroy-cmd" + ], + "tags": "destroy,terraform", + "enable_if_env": { + "CM_DESTROY_TERRAFORM": [ "on" ] + }, + "dynamic": true + } + ], + "input_mapping": { + "destroy": "CM_DESTROY_TERRAFORM", + "cminit": "CM_TERRAFORM_CM_INIT", + "key_file": "CM_SSH_KEY_FILE", + "run_cmds": "CM_TERRAFORM_RUN_COMMANDS", + "gcp_credentials_json_file": "CM_GCP_CREDENTIALS_JSON_PATH", + "ssh_key_file": "CM_SSH_KEY_FILE" + }, + "default_env": { + "TF_VAR_SECURITY_GROUP_ID": "sg-0783752c97d2e011d", + "TF_VAR_CPU_COUNT": "1" + }, + "variations": { + "aws": { + "group": "cloud-provider", + "default": true, + "default_variations": { + "aws-instance-type": "t2.micro", + "region": "us-west-2" + }, + "env": { + "CM_TERRAFORM_CONFIG_DIR_NAME": "aws" + } + }, + "gcp": { + "group": "cloud-provider", + "default_variations": { + "gcp-instance-type": "f1-micro", + "gcp-instance-image": "ubuntu-2204-jammy-v20230114", + "region": "region.us-west1", + "zone": "zone.us-west1-a", + "storage-size": "storage_size.120", + "instance-name": "instance_name.microubuntu2204" + }, + "default_env": { + "TF_VAR_SSH_PUB_KEY_FILE": "$HOME/.ssh/id_rsa.pub", + "TF_VAR_SSH_USER": "asuresh" + }, + "env": { + "CM_TERRAFORM_CONFIG_DIR_NAME": "gcp" + } + }, + "n1-standard.#": { + "base": [ + "gcp" + ], + "group": "gcp-instance-type", + "env": { + "TF_VAR_INSTANCE_TYPE": "n1-highmem-#" + } + }, + "n1-highmem.#": { + "base": [ + "gcp" + ], + "group": "gcp-instance-type", + "env": { + "TF_VAR_INSTANCE_TYPE": "n1-highmem-#" + } + }, + "f1-micro": { + "base": [ + "gcp" + ], + "group": "gcp-instance-type", + "env": { + "TF_VAR_INSTANCE_TYPE": "f1-micro" + } + }, + "t2.micro": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.micro" + } + }, + "t2.small": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.small" + } + }, + "t2.medium": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.medium" + } + }, + "t2.large": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.large" + } + }, + "t2.xlarge": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.xlarge" + } + }, + "t2.2xlarge": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.2xlarge" + } + }, + "t2.nano": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.nano" + } + }, + "t2.#": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "t2.#" + } + }, + "c5.4xlarge": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "c5.4xlarge" + } + }, + "c5.12xlarge": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "c5.12xlarge" + } + }, + "c5d.9xlarge": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "c5d.9xlarge" + } + }, + "g4dn.xlarge": { + "group": "aws-instance-type", + "base": [ + "aws" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "g4dn.xlarge" + } + }, + "x86": { + "group": "platform", + "default": true, + "env": { + "CM_INSTANCE_PLATFORM": "x86" + } + }, + "arm64": { + "group": "platform", + "env": { + "CM_INSTANCE_PLATFORM": "arm64" + } + }, + "a1.2xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "arm64" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "a1.2xlarge" + } + }, + "a1.xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "arm64" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "a1.xlarge" + } + }, + "m7g.xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "arm64", + "graviton" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "m7g.xlarge" + } + }, + "m7g.2xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "arm64", + "graviton" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "m7g.2xlarge" + } + }, + "inf1.xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "inferentia" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "inf1.xlarge" + } + }, + "inf1.2xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "inferentia" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "inf1.2xlarge" + } + }, + "inf2.xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "inferentia" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "inf2.xlarge" + } + }, + "inf2.8xlarge": { + "group": "aws-instance-type", + "base": [ + "aws", + "inferentia" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "inf2.8xlarge" + } + }, + "a1.metal": { + "group": "aws-instance-type", + "base": [ + "aws", + "arm64" + ], + "env": { + "TF_VAR_INSTANCE_TYPE": "a1.metal" + } + }, + "storage_size.8": { + "group": "storage-size", + "env": { + "TF_VAR_DISK_GBS": "8" + } + }, + "storage_size.#": { + "group": "storage-size", + "env": { + "TF_VAR_DISK_GBS": "#" + } + }, + "us-west-2": { + "group": "region", + "env": { + "TF_VAR_INSTANCE_REGION": "us-west-2" + } + }, + "region.#": { + "group": "region", + "env": { + "TF_VAR_INSTANCE_REGION": "#" + } + }, + "zone.#": { + "group": "zone", + "env": { + "TF_VAR_INSTANCE_ZONE": "#" + } + }, + "gcp_instance_type.#": { + "group": "gcp-instance-type", + "env": { + "TF_VAR_INSTANCE_TYPE": "#" + } + }, + "aws_instance_type.#": { + "group": "aws-instance-type", + "env": { + "TF_VAR_INSTANCE_TYPE": "#" + } + }, + "gcp_project.#": { + "group": "gcp-project", + "env": { + "TF_VAR_GCP_PROJECT": "#" + } + }, + "aws_instance_image.ami-0735c191cf914754d": { + "group": "aws-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "ami-0735c191cf914754d" + } + }, + "aws_instance_image.ami-0a0d8589b597d65b3": { + "group": "aws-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "ami-0a0d8589b597d65b3" + } + }, + "ubuntu.#": { + "env": { + "TF_VAR_INSTANCE_IMAGE_OS": "ubuntu.#" + } + }, + "rhel.#": { + "env": { + "TF_VAR_INSTANCE_IMAGE_OS": "rhel.#" + } + }, + "amazon-linux-2-kernel.#": { + "env": { + "TF_VAR_INSTANCE_IMAGE_OS": "amazon-linux-2-kernel.#" + } + }, + "amazon-linux-2-kernel.510,arm64,us-west-2": { + "group": "aws-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "ami-0f1a5f5ada0e7da53" + } + }, + "rhel.9,x86,us-west-2": { + "group": "aws-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "ami-0dda7e535b65b6469" + } + }, + "ubuntu.2204,arm64,us-west-2": { + "group": "aws-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "ami-079f51a7bcca65b92" + } + }, + "ubuntu.2204,x86,us-west-2": { + "group": "aws-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "ami-0735c191cf914754d" + } + }, + "inferentia,amazon-linux-2-kernel.510": { + "default_variations": { + "aws-instance-image": "amazon-linux-2-kernel.510,arm64,us-west-2" + } + }, + "aws_instance_image.#": { + "group": "aws-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "#" + } + }, + "gcp_instance_image.#": { + "group": "gcp-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "#" + } + }, + "debian-cloud/debian-11": { + "group": "gcp-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "debian-cloud/debian-11" + } + }, + "ubuntu-2204-jammy-v20230114": { + "group": "gcp-instance-image", + "env": { + "TF_VAR_INSTANCE_IMAGE": "ubuntu-2204-jammy-v20230114" + } + }, + "instance_name.#": { + "group": "instance-name", + "env": { + "TF_VAR_INSTANCE_NAME": "#" + } + }, + "graviton": { + "default_variations": { + "platform": "arm64" + }, + "env": { + "CM_TERRAFORM_AWS_GRAVITON_INSTANCE": "yes" + } + }, + "inferentia": { + "default_variations": { + "platform": "arm64" + }, + "env": { + "CM_TERRAFORM_AWS_INFERENTIA_INSTANCE": "yes" + } + } + } +} diff --git a/script/run-terraform/aws/apply_credentials.sh b/script/run-terraform/aws/apply_credentials.sh new file mode 100644 index 0000000000..ff649c594f --- /dev/null +++ b/script/run-terraform/aws/apply_credentials.sh @@ -0,0 +1,3 @@ +export TF_VAR_ACCESS_KEY=$AWS_ACCESS_KEY_ID +export TF_VAR_SECRET_KEY=$AWS_SECRET_ACCESS_KEY +export TF_VAR_TOKEN=$AWS_SESSION_TOKEN diff --git a/script/run-terraform/aws/credentials.example b/script/run-terraform/aws/credentials.example new file mode 100644 index 0000000000..dff61bd91a --- /dev/null +++ b/script/run-terraform/aws/credentials.example @@ -0,0 +1,3 @@ +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_SESSION_TOKEN="IQoJb3JpZ2luX2VjEJH//////////wEafyhbXdlc3QtMiJGMEQCIBulUe3NOGrPDkxmCHXnwBxddgbbaj2rH94tpgMfwmKdD50CR6eKjh5+iea2w+9EUWaZEW4G0CN06JWFEIX2v0k2SLkXODVoG7MAWLMFWuBsssrWfLIRoVc9AODQodivdsGUOYWzqFRQ8HKbDjm+DSX05GjObwuucohkvWZZ3LmkspXQ1sOoaC62GcIcJuhp3pR/ajZ8iASLTIBFhcP8nuif+aDMO6HVRgQ9D60BJtqlPCzQH3bY/KKK9iQHQstcVsw17ne5bcnBgfdVOxFKxbuivkQ1CitxHko86z9gsaalEVxspcAwbRnMQuldi3win09ny8qUtWYqA+wEtc2n5ZPFS4UhF0RyE0IYy7dfrCnWdmv0elBRcfAZ0wQNNbt8iUoUYI1+stQexwHYJOtzLqvzoLLWwqrxwPaIEUZFposB2tJAV+krBq7ueIw50AJa8XKgjn5dTvFTaX4rYWh9ck7i7Q4xiq2CcQRArMRUkJRTCs8f+aBjqnAT7n+GhH7alJzRSjXSJ8Ln3t2KlzkPrraspoy5xqH61+yf5tRE2p511PiK9tl94/r0OCzYo9E0SYYket1TU" diff --git a/script/run-terraform/aws/main.tf b/script/run-terraform/aws/main.tf new file mode 100644 index 0000000000..dd281310d0 --- /dev/null +++ b/script/run-terraform/aws/main.tf @@ -0,0 +1,67 @@ +variable ACCESS_KEY { + type = string + description = "AWS access key" +} +variable SECRET_KEY { + type = string + description = "AWS secret key" +} +variable TOKEN { + type = string + description = "AWS Token" +} +variable INSTANCE_TYPE { + type = string + description = "AWS instance type" +} +variable INSTANCE_REGION { + type = string + description = "AWS instance region" +} +variable INSTANCE_IMAGE { + type = string + description = "AWS instance image" +} +variable SECURITY_GROUP_ID { + type = string + description = "AWS instance security group id" +} +variable CPU_COUNT { + default = 1 + description = "AWS CPU count" +} +variable DISK_GBS { + default = 8 + description = "AWS Disk space in GBs" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.0" + } + } +} +# Configure the AWS Provider +provider "aws" { + region = var.INSTANCE_REGION + access_key=var.ACCESS_KEY + secret_key=var.SECRET_KEY + token=var.TOKEN +} + +resource "aws_instance" "cm" { + ami = var.INSTANCE_IMAGE + instance_initiated_shutdown_behavior = "terminate" + instance_type = var.INSTANCE_TYPE + key_name = "cmuser" + vpc_security_group_ids = [ + var.SECURITY_GROUP_ID + ] + root_block_device { + delete_on_termination = true + volume_size = var.DISK_GBS + } +} + diff --git a/script/run-terraform/customize.py b/script/run-terraform/customize.py new file mode 100644 index 0000000000..eeddbff60d --- /dev/null +++ b/script/run-terraform/customize.py @@ -0,0 +1,87 @@ +from cmind import utils +import cmind as cm +import os +import shutil +import json + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + script_dir = i['run_script_input']['path'] + config_dir = os.path.join(script_dir, env.get('CM_TERRAFORM_CONFIG_DIR_NAME', '')) + env['CM_TERRAFORM_CONFIG_DIR'] = config_dir + cache_dir = os.getcwd() + + print(f"Running terraform from {cache_dir}") + + shutil.copy(os.path.join(config_dir, "main.tf"), cache_dir) + env['CM_TERRAFORM_RUN_DIR'] = cache_dir + + return {'return': 0} + +def postprocess(i): + env = i['env'] + if env.get('CM_DESTROY_TERRAFORM'): + return {'return': 0} + state = i['state'] + with open("terraform.tfstate") as f: + tfstate = json.load(f) +# print(tfstate) + resources = tfstate['resources'] + for resource in resources: + if resource['type'] == 'aws_instance': + aws_resource = resource + break + instances_state = aws_resource['instances'] + state['CM_TF_NEW_INSTANCES_STATE'] = [] + ssh_key_file = env.get('CM_SSH_KEY_FILE') + user = 'ubuntu' + for instance_state in instances_state: + instance_attributes = instance_state['attributes'] + state['CM_TF_NEW_INSTANCES_STATE'].append(instance_attributes) + public_ip = instance_attributes['public_ip'] + if env.get('CM_TERRAFORM_CM_INIT'): + run_input = { + 'automation': 'script', + 'action': 'run', + 'tags': 'remote,run,ssh', + 'env': { + }, + 'host': public_ip, + 'user': user, + 'skip_host_verify': True, + 'ssh_key_file': ssh_key_file, + 'quiet': True, + 'silent': True, + 'run_cmds': [ + "sudo apt-get update", + "sudo apt-get -y upgrade", + "sudo apt-get install -y python3-pip", + "python3 -m pip install cmind", + "source ~/.profile", + "cm pull repo ctuning@mlcommons-ck", + "cm run script --tags=get,sys-utils-cm" + ] + } + if env.get('CM_TERRAFORM_RUN_COMMANDS'): + run_cmds = env.get('CM_TERRAFORM_RUN_COMMANDS') + for cmd in run_cmds: + cmd=cmd.replace(":", "=") + cmd=cmd.replace(";;", ",") + run_input['run_cmds'].append(cmd) + r = cm.access(run_input) + if r['return'] > 0: + return r + #print(r) + print_attr(instance_attributes, "id") + print_attr(instance_attributes, "instance_type") + print_attr(instance_attributes, "public_ip") + print_attr(instance_attributes, "public_dns") + print_attr(instance_attributes, "security_groups") + + return {'return': 0} + +def print_attr(instance_attributes, key): + if key in instance_attributes: + print(key.upper() + ": " + str(instance_attributes[key])) diff --git a/script/run-terraform/gcp/apply_credentials.sh b/script/run-terraform/gcp/apply_credentials.sh new file mode 100644 index 0000000000..e69de29bb2 diff --git a/script/run-terraform/gcp/main.tf b/script/run-terraform/gcp/main.tf new file mode 100644 index 0000000000..abf47034e0 --- /dev/null +++ b/script/run-terraform/gcp/main.tf @@ -0,0 +1,80 @@ +variable INSTANCE_TYPE { + type = string + description = "GCP instance type" +} +variable INSTANCE_NAME { + type = string + description = "GCP instance name" +} +variable INSTANCE_IMAGE { + type = string + description = "GCP instance OS image" +} +variable GCP_PROJECT { + type = string + description = "GCP project ID" +} +variable SECURITY_GROUP_ID { + type = string + description = "GCP instance security group id" +} +variable CPU_COUNT { + default = 1 + description = "GCP CPU count" +} +variable DISK_GBS { + default = 8 + description = "GCP Disk space in GBs" +} +variable SSH_PUB_KEY_FILE { + type = string + description = "Path to SSH public key" +} +variable SSH_USER { + type = string + description = "SSH username" +} + +variable INSTANCE_REGION { + type = string + description = "GCP region" +} + +variable INSTANCE_ZONE { + type = string + description = "GCP zone" +} + + +resource "google_compute_instance" "cm" { + name = var.INSTANCE_NAME + machine_type = var.INSTANCE_TYPE + zone = var.INSTANCE_ZONE + project = var.GCP_PROJECT + tags = ["cm"] + + boot_disk { + initialize_params { + image = var.INSTANCE_IMAGE + labels = { + my_label = "value" + } + } + } + + network_interface { + network = "default" + + access_config { + // Ephemeral public IP + } + } + + metadata = { + ssh-keys = "${var.SSH_USER}:${file(var.SSH_PUB_KEY_FILE)}" + } + + metadata_startup_script = "echo hi > /test.txt" + + +} diff --git a/script/run-terraform/run.sh b/script/run-terraform/run.sh new file mode 100644 index 0000000000..094cffcd98 --- /dev/null +++ b/script/run-terraform/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash +if [[ ${CM_TERRAFORM_CONFIG_DIR} == "aws" ]]; then + source ${CM_TERRAFORM_CONFIG_DIR}/credentials.sh + source ${CM_TERRAFORM_CONFIG_DIR}/apply_credentials.sh +fi + + +if [[ -z $CM_DESTROY_TERRAFORM ]]; then + terraform init -input=false + terraform plan -out=tfplan -input=false + terraform apply -input=false tfplan + test $? -eq 0 || exit $? + sleep 20 +fi diff --git a/script/save-mlperf-inference-implementation-state/README.md b/script/save-mlperf-inference-implementation-state/README.md new file mode 100644 index 0000000000..7ce73ffe7c --- /dev/null +++ b/script/save-mlperf-inference-implementation-state/README.md @@ -0,0 +1,116 @@ +Automatically generated README for this automation recipe: **save-mlperf-inference-implementation-state** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=save-mlperf-inference-implementation-state,b14b813229c444f8) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/save-mlperf-inference-implementation-state)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *save,mlperf,inference,implementation,state* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "save mlperf inference implementation state" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=save,mlperf,inference,implementation,state` + +`cm run script --tags=save,mlperf,inference,implementation,state ` + +*or* + +`cmr "save mlperf inference implementation state"` + +`cmr "save mlperf inference implementation state " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'save,mlperf,inference,implementation,state' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="save,mlperf,inference,implementation,state"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=save,mlperf,inference,implementation,state) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "save mlperf inference implementation state" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/save-mlperf-inference-implementation-state/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/save-mlperf-inference-implementation-state/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/save-mlperf-inference-implementation-state/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/save-mlperf-inference-implementation-state/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/save-mlperf-inference-implementation-state/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/save-mlperf-inference-implementation-state/_cm.yaml) + +___ +### Script output +`cmr "save mlperf inference implementation state " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/save-mlperf-inference-implementation-state/_cm.yaml b/script/save-mlperf-inference-implementation-state/_cm.yaml new file mode 100644 index 0000000000..4f1deee8e2 --- /dev/null +++ b/script/save-mlperf-inference-implementation-state/_cm.yaml @@ -0,0 +1,13 @@ +alias: save-mlperf-inference-implementation-state +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +new_state_keys: + - mlperf-inference-implementation +tags: +- save +- mlperf +- inference +- implementation +- state +uid: b14b813229c444f8 diff --git a/script/save-mlperf-inference-implementation-state/customize.py b/script/save-mlperf-inference-implementation-state/customize.py new file mode 100644 index 0000000000..be3be96798 --- /dev/null +++ b/script/save-mlperf-inference-implementation-state/customize.py @@ -0,0 +1,63 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if not state.get('mlperf-inference-implementation'): #No state information. Just returning + return {'return': 0} + + if env.get('CM_MLPERF_README', "") == "yes": + import cmind as cm + inp = i['input'] + + script_tags = state['mlperf-inference-implementation'].get('script_tags', '') + script_adr = state['mlperf-inference-implementation'].get('script_adr', {}) + + if script_tags != '': + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'env': env, + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + + r = cm.access(cm_input) + if r['return'] > 0: + return r + + state['mlperf-inference-implementation']['print_deps'] = r['new_state']['print_deps'] + + if env.get('CM_DUMP_VERSION_INFO', True): + + if state['mlperf-inference-implementation'].get('script_id', '') == '': + state['mlperf-inference-implementation']['script_id'] = '' + + script_id = state['mlperf-inference-implementation']['script_id'] + run_state = i['input']['run_state'] + version_info = {} + version_info[script_id] = run_state['version_info'] + + state['mlperf-inference-implementation']['version_info'] = version_info + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/set-device-settings-qaic/README.md b/script/set-device-settings-qaic/README.md new file mode 100644 index 0000000000..896b147c93 --- /dev/null +++ b/script/set-device-settings-qaic/README.md @@ -0,0 +1,145 @@ +Automatically generated README for this automation recipe: **set-device-settings-qaic** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=set-device-settings-qaic,408a1a1563b44780) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "set device qaic ai100 cloud performance power setting mode vc ecc" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc` + +`cm run script --tags=set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc[,variations] ` + +*or* + +`cmr "set device qaic ai100 cloud performance power setting mode vc ecc"` + +`cmr "set device qaic ai100 cloud performance power setting mode vc ecc [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "set device qaic ai100 cloud performance power setting mode vc ecc[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_ecc` + - Environment variables: + - *CM_QAIC_ECC*: `yes` + - Workflow: + * `_vc.#` + - Environment variables: + - *CM_QAIC_VC*: `#` + - Workflow: + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_QAIC_DEVICES: `0` + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic/_cm.json)*** + * detect-os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,qaic,platform,sdk + - CM script: [get-qaic-platform-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-qaic-platform-sdk) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-device-settings-qaic/_cm.json) + +___ +### Script output +`cmr "set device qaic ai100 cloud performance power setting mode vc ecc [,variations]" -j` +#### New environment keys (filter) + +* `CM_QAIC_DEVICE_*` +#### New environment keys auto-detected from customize diff --git a/script/set-device-settings-qaic/_cm.json b/script/set-device-settings-qaic/_cm.json new file mode 100644 index 0000000000..156c01b3ce --- /dev/null +++ b/script/set-device-settings-qaic/_cm.json @@ -0,0 +1,54 @@ +{ + "alias": "set-device-settings-qaic", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "DevOps automation", + "deps": [ + { + "tags": "detect-os" + }, + { + "tags": "get,qaic,platform,sdk" + } + ], + "docker_input_mapping": {}, + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "CM_QAIC_DEVICE_*" + ], + "default_env": { + "CM_QAIC_DEVICES": "0" + }, + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "set", + "device", + "qaic", + "ai100", + "cloud", + "performance", + "power", + "setting", + "mode", + "vc", + "ecc" + ], + "uid": "408a1a1563b44780", + "variations": { + "ecc": { + "env": { + "CM_QAIC_ECC": "yes" + } + }, + "vc.#": { + "env": { + "CM_QAIC_VC": "#" + } + } + } +} diff --git a/script/set-device-settings-qaic/customize.py b/script/set-device-settings-qaic/customize.py new file mode 100644 index 0000000000..48d065c84e --- /dev/null +++ b/script/set-device-settings-qaic/customize.py @@ -0,0 +1,39 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_QAIC_ECC', '') == 'yes': + import json + for device in env['CM_QAIC_DEVICES'].split(","): + ecc_template = {} + ecc_template['request'] = [] + ecc_template['request'].append({}) + ecc_template['request'][0]['qid'] = device + ecc_template['request'][0]['dev_config'] = {} + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request'] = {} + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'] = [] + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'].append("RAS_DDR_ECC") + with open("request_"+device+".json", "w") as f: + f.write(json.dumps(ecc_template)) + + if env.get('CM_QAIC_VC', '') != '': + env['CM_QAIC_VC_HEX'] = hex(int(env['CM_QAIC_VC'])) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/set-device-settings-qaic/run.sh b/script/set-device-settings-qaic/run.sh new file mode 100644 index 0000000000..cdc11ac730 --- /dev/null +++ b/script/set-device-settings-qaic/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +IFS="," read -r -a devices <<< "$CM_QAIC_DEVICES" + +if [[ -n ${CM_QAIC_VC} ]]; then + for device in ${devices[@]} + do + run "sudo ${CM_QAIC_TOOLS_PATH}/qaic-diag -d $device -m 0x4B 0x66 0x05 0x1 ${CM_QAIC_VC_HEX}" + done +fi + +if [[ ${CM_QAIC_ECC} == "yes" ]]; then + for device in ${devices} + do + run "sudo ${CM_QAIC_TOOLS_PATH}/qaic-monitor-json -i request_$device.json" + run "rm request_$device.json" + done +fi + diff --git a/script/set-echo-off-win/README.md b/script/set-echo-off-win/README.md new file mode 100644 index 0000000000..8dd46f53e2 --- /dev/null +++ b/script/set-echo-off-win/README.md @@ -0,0 +1,118 @@ +Automatically generated README for this automation recipe: **set-echo-off-win** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=set-echo-off-win,49d94b57524f4fcf) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-echo-off-win)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *set,echo,off,win,echo-off-win,echo-off* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "set echo off win echo-off-win echo-off" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=set,echo,off,win,echo-off-win,echo-off` + +`cm run script --tags=set,echo,off,win,echo-off-win,echo-off ` + +*or* + +`cmr "set echo off win echo-off-win echo-off"` + +`cmr "set echo off win echo-off-win echo-off " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,echo,off,win,echo-off-win,echo-off' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="set,echo,off,win,echo-off-win,echo-off"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=set,echo,off,win,echo-off-win,echo-off) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "set echo off win echo-off-win echo-off" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-echo-off-win/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-echo-off-win/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-echo-off-win/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-echo-off-win/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-echo-off-win/_cm.json) + +___ +### Script output +`cmr "set echo off win echo-off-win echo-off " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/set-echo-off-win/_cm.json b/script/set-echo-off-win/_cm.json new file mode 100644 index 0000000000..59bfef9ac0 --- /dev/null +++ b/script/set-echo-off-win/_cm.json @@ -0,0 +1,18 @@ +{ + "alias": "set-echo-off-win", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "DevOps automation", + "new_state_keys": [ + "script_prefix" + ], + "tags": [ + "set", + "echo", + "off", + "win", + "echo-off-win", + "echo-off" + ], + "uid": "49d94b57524f4fcf" +} diff --git a/script/set-echo-off-win/customize.py b/script/set-echo-off-win/customize.py new file mode 100644 index 0000000000..ef9ba8b8dd --- /dev/null +++ b/script/set-echo-off-win/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + state = i['state'] + + # If windows, download here otherwise use run.sh + if os_info['platform'] == 'windows': + + script_prefix = state.get('script_prefix',[]) + + s='@echo off' + if s not in script_prefix: + script_prefix.insert(0, s) + + state['script_prefix'] = script_prefix + + # Test to skip next dependency + #env = i['env'] + #env['CM_SKIP_SYS_UTILS'] = 'YES' + + return {'return':0} diff --git a/script/set-performance-mode/README.md b/script/set-performance-mode/README.md new file mode 100644 index 0000000000..f4a36920a6 --- /dev/null +++ b/script/set-performance-mode/README.md @@ -0,0 +1,182 @@ +Automatically generated README for this automation recipe: **set-performance-mode** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=set-performance-mode,2c0ab7b64692443d) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *set,system,performance,power,mode* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "set system performance power mode" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=set,system,performance,power,mode` + +`cm run script --tags=set,system,performance,power,mode[,variations] ` + +*or* + +`cmr "set system performance power mode"` + +`cmr "set system performance power mode [variations]" ` + + +* *See the list of `variations` [here](#variations) and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.* + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,system,performance,power,mode' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="set,system,performance,power,mode"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=set,system,performance,power,mode) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "set system performance power mode[variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
+ Click here to expand this section. + + * `_reproducibility` + - Environment variables: + - *CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE*: `yes` + - Workflow: + +
+ + + * Group "**device**" +
+ Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_SET_PERFORMANCE_MODE_OF*: `cpu` + - Workflow: + +
+ + + * Group "**performance-mode**" +
+ Click here to expand this section. + + * **`_performance`** (default) + - Environment variables: + - *CM_SET_PERFORMANCE_MODE*: `performance` + - Workflow: + +
+ + + * Group "**power**" +
+ Click here to expand this section. + + * `_power` + - Environment variables: + - *CM_SET_PERFORMANCE_MODE*: `power` + - Workflow: + +
+ + +#### Default variations + +`_cpu,_performance` +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/_cm.json)*** + * detect-os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * detect-cpu + - CM script: [detect-cpu](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-cpu) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/_cm.json) + 1. ***Run native script if exists*** + * [run-ubuntu.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/run-ubuntu.sh) + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-performance-mode/_cm.json) + +___ +### Script output +`cmr "set system performance power mode [,variations]" -j` +#### New environment keys (filter) + +* `OMP_*` +#### New environment keys auto-detected from customize diff --git a/script/set-performance-mode/_cm.json b/script/set-performance-mode/_cm.json new file mode 100644 index 0000000000..831650f962 --- /dev/null +++ b/script/set-performance-mode/_cm.json @@ -0,0 +1,60 @@ +{ + "alias": "set-performance-mode", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "DevOps automation", + "deps": [ + { + "tags": "detect-os" + }, + { + "tags": "detect-cpu" + } + ], + "docker_input_mapping": {}, + "input_description": {}, + "input_mapping": {}, + "new_env_keys": [ + "OMP_*" + ], + "new_state_keys": [], + "post_deps": [], + "posthook_deps": [], + "prehook_deps": [], + "tags": [ + "set", + "system", + "performance", + "power", + "mode" + ], + "uid": "2c0ab7b64692443d", + "variations": { + "cpu": { + "group": "device", + "default": "true", + "env": { + "CM_SET_PERFORMANCE_MODE_OF": "cpu" + } + }, + "performance": { + "group": "performance-mode", + "default": true, + "env": { + "CM_SET_PERFORMANCE_MODE": "performance" + } + }, + "power": { + "group": "power", + "env": { + "CM_SET_PERFORMANCE_MODE": "power" + } + }, + "reproducibility": { + "env": { + "CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE": "yes" + } + } + } +} diff --git a/script/set-performance-mode/customize.py b/script/set-performance-mode/customize.py new file mode 100644 index 0000000000..fca19d718c --- /dev/null +++ b/script/set-performance-mode/customize.py @@ -0,0 +1,23 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + env['OMP_PROC_BIND'] = 'true' + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/set-performance-mode/run-ubuntu.sh b/script/set-performance-mode/run-ubuntu.sh new file mode 100644 index 0000000000..fcec442464 --- /dev/null +++ b/script/set-performance-mode/run-ubuntu.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +CM_SUDO="sudo" +#Add your run commands here... +# run "$CM_RUN_CMD" +run "${CM_SUDO} apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r`" +run "${CM_SUDO} cpupower frequency-set -g performance" +if [[ ${CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE} != "no" ]]; then + run "${CM_SUDO} sysctl -w vm.dirty_ratio=8" + run "${CM_SUDO} sysctl -w vm.swappiness=1" + run "${CM_SUDO} sysctl -w vm.zone_reclaim_mode=1" + run "${CM_SUDO} sync; sysctl -w vm.drop_caches=3" + run "${CM_SUDO} sysctl -w kernel.randomize_va_space=0" +fi diff --git a/script/set-performance-mode/run.bat b/script/set-performance-mode/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/set-performance-mode/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/set-performance-mode/run.sh b/script/set-performance-mode/run.sh new file mode 100644 index 0000000000..3a584c10cf --- /dev/null +++ b/script/set-performance-mode/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/script/set-sqlite-dir/README.md b/script/set-sqlite-dir/README.md new file mode 100644 index 0000000000..547125bb8b --- /dev/null +++ b/script/set-sqlite-dir/README.md @@ -0,0 +1,143 @@ +Automatically generated README for this automation recipe: **set-sqlite-dir** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=set-sqlite-dir,05904966355a43ac) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *set,sqlite,dir,sqlite-dir* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "set sqlite dir sqlite-dir" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=set,sqlite,dir,sqlite-dir` + +`cm run script --tags=set,sqlite,dir,sqlite-dir [--input_flags]` + +*or* + +`cmr "set sqlite dir sqlite-dir"` + +`cmr "set sqlite dir sqlite-dir " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,sqlite,dir,sqlite-dir' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="set,sqlite,dir,sqlite-dir"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=set,sqlite,dir,sqlite-dir) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "set sqlite dir sqlite-dir" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--path=value` → `CM_SQLITE_PATH=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "path":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-sqlite-dir/_cm.json) + +___ +### Script output +`cmr "set sqlite dir sqlite-dir " [--input_flags] -j` +#### New environment keys (filter) + +* `CM_SQLITE_PATH` +#### New environment keys auto-detected from customize + +* `CM_SQLITE_PATH` \ No newline at end of file diff --git a/script/set-sqlite-dir/_cm.json b/script/set-sqlite-dir/_cm.json new file mode 100644 index 0000000000..10c5047862 --- /dev/null +++ b/script/set-sqlite-dir/_cm.json @@ -0,0 +1,33 @@ +{ + "alias": "set-sqlite-dir", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "DevOps automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ], + "env": {}, + "input_mapping": { + "path": "CM_SQLITE_PATH" + }, + "new_env_keys": [ + "CM_SQLITE_PATH" + ], + "tags": [ + "set", + "sqlite", + "dir", + "sqlite-dir" + ], + "uid": "05904966355a43ac" +} diff --git a/script/set-sqlite-dir/code.py b/script/set-sqlite-dir/code.py new file mode 100644 index 0000000000..319f23a92f --- /dev/null +++ b/script/set-sqlite-dir/code.py @@ -0,0 +1,2 @@ +import sqlite3 + diff --git a/script/set-sqlite-dir/customize.py b/script/set-sqlite-dir/customize.py new file mode 100644 index 0000000000..638e68ff0c --- /dev/null +++ b/script/set-sqlite-dir/customize.py @@ -0,0 +1,9 @@ +import os + +def postprocess(i): + + env = i['env'] + + env['CM_SQLITE_PATH'] = os.getcwd() + + return {'return':0} diff --git a/script/set-sqlite-dir/run.bat b/script/set-sqlite-dir/run.bat new file mode 100644 index 0000000000..37f249b0fe --- /dev/null +++ b/script/set-sqlite-dir/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/set-sqlite-dir/run.sh b/script/set-sqlite-dir/run.sh new file mode 100644 index 0000000000..9b94917d9e --- /dev/null +++ b/script/set-sqlite-dir/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/script/set-venv/README-extra.md b/script/set-venv/README-extra.md new file mode 100644 index 0000000000..987ad1f67b --- /dev/null +++ b/script/set-venv/README-extra.md @@ -0,0 +1,6 @@ +# Examples + +```bash +cmr "set venv" mlperf-test +cmr "set venv" mlperf-test2 --python=/usr/bin/python3 +``` diff --git a/script/set-venv/README.md b/script/set-venv/README.md new file mode 100644 index 0000000000..b5bc9dcddb --- /dev/null +++ b/script/set-venv/README.md @@ -0,0 +1,131 @@ +Automatically generated README for this automation recipe: **set-venv** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=set-venv,07163dd7d6cd4026) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-venv)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *set,venv* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "set venv" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=set,venv` + +`cm run script --tags=set,venv [--input_flags]` + +*or* + +`cmr "set venv"` + +`cmr "set venv " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,venv' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="set,venv"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=set,venv) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "set venv" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--python=value` → `CM_SET_VENV_PYTHON=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "python":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-venv/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-venv/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-venv/_cm.yaml) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-venv/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-venv/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/set-venv/_cm.yaml) + +___ +### Script output +`cmr "set venv " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/set-venv/_cm.yaml b/script/set-venv/_cm.yaml new file mode 100644 index 0000000000..40b08b9f19 --- /dev/null +++ b/script/set-venv/_cm.yaml @@ -0,0 +1,14 @@ +alias: set-venv +uid: 07163dd7d6cd4026 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +input_mapping: + python: CM_SET_VENV_PYTHON + +cache: false + +tags: +- set +- venv diff --git a/script/set-venv/customize.py b/script/set-venv/customize.py new file mode 100644 index 0000000000..a8517a366e --- /dev/null +++ b/script/set-venv/customize.py @@ -0,0 +1,96 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + ############################################################ + cur_dir = os.getcwd() + + name = env.get('CM_NAME', '') + if name == '': + artifacts = i.get('input', {}).get('artifacts', []) + if len(artifacts)>0: + name = artifacts[0] + if name == '': + name = 'default' + + if os_info['platform'] == 'windows': + activate_script = os.path.join('Scripts', 'activate.bat') + else: + activate_script = os.path.join('bin', 'activate') + + activate_script2 = os.path.join(name, activate_script) + + if not os.path.isfile(activate_script2): + force_python_path = env.get('CM_SET_VENV_PYTHON','') + + if force_python_path != '' and not os.path.isfile(force_python_path): + return {'return':1, 'error':'python executable not found: {}'.format(force_python_path)} + + if os_info['platform'] == 'windows': + python_path = 'python.exe' if force_python_path == '' else force_python_path + create_dir = ' & md {}\work' + else: + python_path = 'python3' if force_python_path == '' else force_python_path + create_dir = ' ; mkdir {}/work' + + cmd = python_path + ' -m venv ' + name + create_dir.format(name) + + print ('====================================================================') + + print ('Creating venv: "{}" ...'.format(cmd)) + os.system(cmd) + + + if os.path.isfile(activate_script2): + script_file = 'venv-'+name + if os_info['platform'] == 'windows': + script_file += '.bat' + xcmd = script_file + else: + script_file += '.sh' + xcmd = 'source '+script_file + + if not os.path.isfile(script_file): + + work_dir = os.path.join(name, 'work') + if not os.path.isdir(work_dir): + os.makedirs(work_dir) + + if os_info['platform'] == 'windows': + shell = os.environ.get('CM_SET_VENV_SHELL', '') + if shell == '': + shell = env.get('CM_SET_VENV_SHELL', '') + if shell != '': + shell = shell.replace('CM_SET_VENV_WORK', 'work') + if shell == '': shell = 'cmd' + cmd = 'cd {} & call {} & set CM_REPOS=%CD%\{}\CM & {}\n'.format(name, activate_script, name, shell) + else: + cmd = '#!/bin/bash\n\ncd {} ; source {} ; export CM_REPOS=$PWD/CM ; cd work\n'.format(name, activate_script) + + with open(script_file, 'w') as f: + f.write(cmd) + + print ('====================================================================') + print ('Please run the following command:') + print ('') + print (xcmd) + print ('====================================================================') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/tar-my-folder/README-extra.md b/script/tar-my-folder/README-extra.md new file mode 100644 index 0000000000..8c9b525087 --- /dev/null +++ b/script/tar-my-folder/README-extra.md @@ -0,0 +1,12 @@ +# Compress using tar +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) compresses a given folder and generates a tar.gz file + +## How To +```bash +cm run script --tags=run,tar --input_dir=[DIR_PATH] +``` + + +### Additional Options +* `--output_dir:` Directory in which to generate the output file. Default is current working directory +* `--outfile:`: Output filename. Default is inputfoldername".gz" diff --git a/script/tar-my-folder/README.md b/script/tar-my-folder/README.md new file mode 100644 index 0000000000..b54099b916 --- /dev/null +++ b/script/tar-my-folder/README.md @@ -0,0 +1,135 @@ +Automatically generated README for this automation recipe: **tar-my-folder** + +Category: **DevOps automation** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=tar-my-folder,3784212e986c456b) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/tar-my-folder)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,tar* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run tar" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,tar` + +`cm run script --tags=run,tar [--input_flags]` + +*or* + +`cmr "run tar"` + +`cmr "run tar " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,tar' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,tar"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,tar) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run tar" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input_dir=value` → `CM_TAR_INPUT_DIR=value` +* `--outfile=value` → `CM_TAR_OUTFILE=value` +* `--output_dir=value` → `CM_TAR_OUTPUT_DIR=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input_dir":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/tar-my-folder/_cm.json) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/tar-my-folder/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/tar-my-folder/_cm.json) + 1. ***Run native script if exists*** + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/tar-my-folder/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/tar-my-folder/_cm.json) + +___ +### Script output +`cmr "run tar " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/tar-my-folder/_cm.json b/script/tar-my-folder/_cm.json new file mode 100644 index 0000000000..b7bf51a189 --- /dev/null +++ b/script/tar-my-folder/_cm.json @@ -0,0 +1,19 @@ +{ + "alias": "tar-my-folder", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "DevOps automation", + "cache": false, + "clean_files": [], + "deps": [], + "input_mapping": { + "input_dir": "CM_TAR_INPUT_DIR", + "outfile": "CM_TAR_OUTFILE", + "output_dir": "CM_TAR_OUTPUT_DIR" + }, + "tags": [ + "run", + "tar" + ], + "uid": "3784212e986c456b" +} diff --git a/script/tar-my-folder/customize.py b/script/tar-my-folder/customize.py new file mode 100644 index 0000000000..d405e76fdb --- /dev/null +++ b/script/tar-my-folder/customize.py @@ -0,0 +1,29 @@ +from cmind import utils +import cmind as cm +import os +import subprocess +from os.path import exists + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + input_dir = env.get("CM_TAR_INPUT_DIR", "") + if input_dir == "": + return {'return': 1, 'error': 'Please set CM_TAR_INPUT_DIR'} + output_dir = env.get("CM_TAR_OUTPUT_DIR", "") + if output_dir == "": + output_dir = os.getcwd() + output_file = env.get("CM_TAR_OUTFILE", "") + input_dirname = os.path.basename(input_dir) + if output_file == "": + output_file = input_dirname+".tar.gz" + from pathlib import Path + input_path = Path(input_dir) + cd_dir = input_path.parent.absolute() + CMD = 'tar --directory '+str(cd_dir)+' -czf ' + os.path.join(output_dir, output_file) + ' ' + input_dirname + print(CMD) + ret = os.system(CMD) + print("Tar file "+os.path.join(output_dir, output_file)+ " created") + + return {'return':ret} diff --git a/script/test-download-and-extract-artifacts/README-extra.md b/script/test-download-and-extract-artifacts/README-extra.md new file mode 100644 index 0000000000..582991f6d2 --- /dev/null +++ b/script/test-download-and-extract-artifacts/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/script/test-download-and-extract-artifacts/README.md b/script/test-download-and-extract-artifacts/README.md new file mode 100644 index 0000000000..385a728f27 --- /dev/null +++ b/script/test-download-and-extract-artifacts/README.md @@ -0,0 +1,125 @@ +Automatically generated README for this automation recipe: **test-download-and-extract-artifacts** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=test-download-and-extract-artifacts,51dde7580b404b27) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* All CM tags to find and reuse this script (see in above meta description): *test,download-and-extract-artifacts* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "test download-and-extract-artifacts" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=test,download-and-extract-artifacts` + +`cm run script --tags=test,download-and-extract-artifacts ` + +*or* + +`cmr "test download-and-extract-artifacts"` + +`cmr "test download-and-extract-artifacts " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,download-and-extract-artifacts' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="test,download-and-extract-artifacts"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=test,download-and-extract-artifacts) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "test download-and-extract-artifacts" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml)*** + * download,file,_url.https://zenodo.org/record/4735647/files/resnet50_v1.onnx + - CM script: [download-file](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-file) + * download-and-extract,_extract,_url.https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1 + - CM script: [download-and-extract](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/download-and-extract) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml) + +___ +### Script output +`cmr "test download-and-extract-artifacts " -j` +#### New environment keys (filter) + +* `CM_REPRODUCE_PAPER_XYZ*` +#### New environment keys auto-detected from customize diff --git a/script/test-download-and-extract-artifacts/_cm.yaml b/script/test-download-and-extract-artifacts/_cm.yaml new file mode 100644 index 0000000000..c1961ba300 --- /dev/null +++ b/script/test-download-and-extract-artifacts/_cm.yaml @@ -0,0 +1,31 @@ +alias: test-download-and-extract-artifacts +uid: 51dde7580b404b27 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: Tests + +deps: +- tags: download,file,_url.https://zenodo.org/record/4735647/files/resnet50_v1.onnx + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_REPRODUCE_PAPER_XYZ + CM_DOWNLOAD_CHECKSUM: + force_cache: true + extra_cache_tags: reproduce,paper,artifact,zenodo,xyz +- tags: download-and-extract,_extract,_url.https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1 + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_REPRODUCE_PAPER_XYZ2 + CM_EXTRACT_FINAL_ENV_NAME: CM_REPRODUCE_PAPER_XYZ2_EXTRACTED +# CM_DOWNLOAD_CHECKSUM: + force_cache: true + extra_cache_tags: reproduce,paper,artifact,zenodo,xyz2 + +new_env_keys: + - CM_REPRODUCE_PAPER_XYZ* + +tags: +- test +- download-and-extract-artifacts diff --git a/script/test-download-and-extract-artifacts/customize.py b/script/test-download-and-extract-artifacts/customize.py new file mode 100644 index 0000000000..d12f9b3e1d --- /dev/null +++ b/script/test-download-and-extract-artifacts/customize.py @@ -0,0 +1,22 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/test-download-and-extract-artifacts/run.bat b/script/test-download-and-extract-artifacts/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/test-download-and-extract-artifacts/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/test-download-and-extract-artifacts/run.sh b/script/test-download-and-extract-artifacts/run.sh new file mode 100644 index 0000000000..3a584c10cf --- /dev/null +++ b/script/test-download-and-extract-artifacts/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/script/test-mlperf-inference-retinanet/README.md b/script/test-mlperf-inference-retinanet/README.md new file mode 100644 index 0000000000..5e5e22731c --- /dev/null +++ b/script/test-mlperf-inference-retinanet/README.md @@ -0,0 +1,137 @@ +Automatically generated README for this automation recipe: **test-mlperf-inference-retinanet** + +Category: **CM interface prototyping** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=test-mlperf-inference-retinanet,1cedbc3b642a403a) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *test,mlperf-inference-win,retinanet,windows* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "test mlperf-inference-win retinanet windows" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=test,mlperf-inference-win,retinanet,windows` + +`cm run script --tags=test,mlperf-inference-win,retinanet,windows ` + +*or* + +`cmr "test mlperf-inference-win retinanet windows"` + +`cmr "test mlperf-inference-win retinanet windows " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,mlperf-inference-win,retinanet,windows' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="test,mlperf-inference-win,retinanet,windows"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=test,mlperf-inference-win,retinanet,windows) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "test mlperf-inference-win retinanet windows" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/_cm.json)*** + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,generic-python-lib,_requests + - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib) + * get,loadgen + * CM names: `--adr.['loadgen', 'mlperf-inference-loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) + * mlperf,inference,source + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,dataset,open-images,original + - CM script: [get-dataset-openimages](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-openimages) + * get,raw,ml-model,retinanet + - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-mlperf-inference-retinanet/_cm.json) + +___ +### Script output +`cmr "test mlperf-inference-win retinanet windows " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/test-mlperf-inference-retinanet/_cm.json b/script/test-mlperf-inference-retinanet/_cm.json new file mode 100644 index 0000000000..fb8be75934 --- /dev/null +++ b/script/test-mlperf-inference-retinanet/_cm.json @@ -0,0 +1,49 @@ +{ + "alias": "test-mlperf-inference-retinanet", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "CM interface prototyping", + "deps": [ + { + "tags": "get,sys-utils-cm" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "tags": "get,generic-python-lib,_requests" + }, + { + "names": [ + "loadgen", "mlperf-inference-loadgen" + ], + "tags": "get,loadgen" + }, + { + "force_env_keys": [ + "CM_GIT_*" + ], + "names": [ + "inference-src" + ], + "tags": "mlperf,inference,source" + }, + { + "tags": "get,dataset,open-images,original" + }, + { + "tags": "get,raw,ml-model,retinanet" + } + ], + "tags": [ + "test", + "mlperf-inference-win", + "retinanet", + "windows" + ], + "uid": "1cedbc3b642a403a" +} diff --git a/script/test-mlperf-inference-retinanet/customize.py b/script/test-mlperf-inference-retinanet/customize.py new file mode 100644 index 0000000000..14e20d1bf2 --- /dev/null +++ b/script/test-mlperf-inference-retinanet/customize.py @@ -0,0 +1,18 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + + return {'return':0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return':0} diff --git a/script/test-mlperf-inference-retinanet/run.bat b/script/test-mlperf-inference-retinanet/run.bat new file mode 100644 index 0000000000..38970bc0ef --- /dev/null +++ b/script/test-mlperf-inference-retinanet/run.bat @@ -0,0 +1,8 @@ +echo. + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +cd %CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH% + +%CM_PYTHON_BIN_WITH_PATH% python/main.py --profile retinanet-onnxruntime --scenario Offline --model %CM_ML_MODEL_FILE_WITH_PATH% --dataset-path %CM_DATASET_PATH_ROOT%\validation\data --accuracy diff --git a/script/test-mlperf-inference-retinanet/run.sh b/script/test-mlperf-inference-retinanet/run.sh new file mode 100644 index 0000000000..b437374079 --- /dev/null +++ b/script/test-mlperf-inference-retinanet/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "" + +cd ${CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH} + +ln -s ${CM_DATASET_PATH_ROOT}/annotations ${CM_DATASET_PATH_ROOT}/validation/data/annotations + +${CM_PYTHON_BIN_WITH_PATH} python/main.py --profile retinanet-onnxruntime --scenario Offline --model ${CM_ML_MODEL_FILE_WITH_PATH} --dataset-path ${CM_DATASET_PATH_ROOT}/validation/data --accuracy diff --git a/script/test-set-sys-user-cm/README.md b/script/test-set-sys-user-cm/README.md new file mode 100644 index 0000000000..b532a02e02 --- /dev/null +++ b/script/test-set-sys-user-cm/README.md @@ -0,0 +1,120 @@ +Automatically generated README for this automation recipe: **test-set-sys-user-cm** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=test-set-sys-user-cm,25fdfcf0fe434af2) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-set-sys-user-cm)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *demo,set,sys-user,cm,sys-user-cm* +* Output cached? *True* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "demo set sys-user cm sys-user-cm" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=demo,set,sys-user,cm,sys-user-cm` + +`cm run script --tags=demo,set,sys-user,cm,sys-user-cm ` + +*or* + +`cmr "demo set sys-user cm sys-user-cm"` + +`cmr "demo set sys-user cm sys-user-cm " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'demo,set,sys-user,cm,sys-user-cm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="demo,set,sys-user,cm,sys-user-cm"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=demo,set,sys-user,cm,sys-user-cm) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "demo set sys-user cm sys-user-cm" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_SUDO: `sudo` + +
+ +___ +### Dependencies on other CM scripts + + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-set-sys-user-cm/_cm.json) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-set-sys-user-cm/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-set-sys-user-cm/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-set-sys-user-cm/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/test-set-sys-user-cm/_cm.json) + +___ +### Script output +`cmr "demo set sys-user cm sys-user-cm " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/test-set-sys-user-cm/_cm.json b/script/test-set-sys-user-cm/_cm.json new file mode 100644 index 0000000000..06c20f8d22 --- /dev/null +++ b/script/test-set-sys-user-cm/_cm.json @@ -0,0 +1,18 @@ +{ + "alias": "test-set-sys-user-cm", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "cache": true, + "default_env": { + "CM_SUDO": "sudo" + }, + "tags": [ + "demo", + "set", + "sys-user", + "cm", + "sys-user-cm" + ], + "uid": "25fdfcf0fe434af2" +} diff --git a/script/test-set-sys-user-cm/run.sh b/script/test-set-sys-user-cm/run.sh new file mode 100644 index 0000000000..c0d513db70 --- /dev/null +++ b/script/test-set-sys-user-cm/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +${CM_SUDO} groupadd -g 1111 ckuser +${CM_SUDO} useradd -u 2222 -g ckuser --create-home --shell /bin/bash ckuser +${CM_SUDO} echo "ckuser:ckuser" | chpasswd +${CM_SUDO} adduser ckuser sudo +${CM_SUDO} echo "ckuser ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers diff --git a/script/truncate-mlperf-inference-accuracy-log/README-extra.md b/script/truncate-mlperf-inference-accuracy-log/README-extra.md new file mode 100644 index 0000000000..71b498f99f --- /dev/null +++ b/script/truncate-mlperf-inference-accuracy-log/README-extra.md @@ -0,0 +1,7 @@ +# MLPerf Inference Accuracy Log Truncator +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Inference accuracy log truncator](https://github.com/mlcommons/inference/blob/master/tools/submission/truncate_accuracy_log.py) on a given submission folder. + +## How To +```bash +cm run script --tags=run,mlperf,inference,accuracy,truncator --submitter=[SUBMITTER_NAME] --submission_dir=[SUBMISSION_FOLDER] +``` diff --git a/script/truncate-mlperf-inference-accuracy-log/README.md b/script/truncate-mlperf-inference-accuracy-log/README.md new file mode 100644 index 0000000000..a0bd449670 --- /dev/null +++ b/script/truncate-mlperf-inference-accuracy-log/README.md @@ -0,0 +1,146 @@ +Automatically generated README for this automation recipe: **truncate-mlperf-inference-accuracy-log** + +Category: **MLPerf benchmark support** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=truncate-mlperf-inference-accuracy-log,9d5ec20434084d14) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/truncate-mlperf-inference-accuracy-log)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator` + +`cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator [--input_flags]` + +*or* + +`cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator"` + +`cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` +* `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` +* `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "input":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/truncate-mlperf-inference-accuracy-log/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src) + * get,mlperf,submission,dir + * `if (CM_MLPERF_INFERENCE_SUBMISSION_DIR != on)` + * CM names: `--adr.['get-mlperf-submission-dir']...` + - CM script: [get-mlperf-inference-submission-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-submission-dir) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/truncate-mlperf-inference-accuracy-log/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/truncate-mlperf-inference-accuracy-log/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/truncate-mlperf-inference-accuracy-log/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/truncate-mlperf-inference-accuracy-log/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/truncate-mlperf-inference-accuracy-log/_cm.json) + +___ +### Script output +`cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/truncate-mlperf-inference-accuracy-log/_cm.json b/script/truncate-mlperf-inference-accuracy-log/_cm.json new file mode 100644 index 0000000000..3f02725b5b --- /dev/null +++ b/script/truncate-mlperf-inference-accuracy-log/_cm.json @@ -0,0 +1,55 @@ +{ + "alias": "truncate-mlperf-inference-accuracy-log", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": false, + "category": "MLPerf benchmark support", + "clean_files": [], + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + }, + { + "names": [ + "inference-src" + ], + "tags": "get,mlcommons,inference,src" + }, + { + "tags": "get,mlperf,submission,dir", + "names": [ + "get-mlperf-submission-dir" + ], + "skip_if_env": { + "CM_MLPERF_INFERENCE_SUBMISSION_DIR": [ "on" ] + } + } + ], + "input_mapping": { + "input": "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "submission_dir": "CM_MLPERF_INFERENCE_SUBMISSION_DIR", + "submitter": "CM_MLPERF_SUBMITTER" + }, + "tags": [ + "run", + "mlc", + "mlcommons", + "mlperf", + "inference", + "mlperf-inference", + "truncation", + "truncator", + "truncate", + "accuracy", + "accuracy-log", + "accuracy-log-trancation", + "accuracy-log-truncator", + "mlc-accuracy-log-trancation", + "mlc-accuracy-log-truncator" + ], + "uid": "9d5ec20434084d14" +} diff --git a/script/truncate-mlperf-inference-accuracy-log/customize.py b/script/truncate-mlperf-inference-accuracy-log/customize.py new file mode 100644 index 0000000000..d13d504ff8 --- /dev/null +++ b/script/truncate-mlperf-inference-accuracy-log/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import cmind as cm +import os +import subprocess +from os.path import exists + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + + if submission_dir == "": + print("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") + return {'return': 1, 'error':'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified in env in run-mlperf-accuracy-log-truncator'} + + submitter = env.get("CM_MLPERF_SUBMITTER", "CTuning") + + os.system("rm -rf " + submission_dir + "_logs") + + CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "truncate_accuracy_log.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --backup '" + submission_dir + "_logs'" + env['CM_RUN_CMD'] = CMD + + return {'return':0} diff --git a/script/truncate-mlperf-inference-accuracy-log/run.sh b/script/truncate-mlperf-inference-accuracy-log/run.sh new file mode 100644 index 0000000000..1b3c5c3c02 --- /dev/null +++ b/script/truncate-mlperf-inference-accuracy-log/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? diff --git a/script/upgrade-python-pip/README.md b/script/upgrade-python-pip/README.md new file mode 100644 index 0000000000..fe8a8d8abf --- /dev/null +++ b/script/upgrade-python-pip/README.md @@ -0,0 +1,125 @@ +Automatically generated README for this automation recipe: **upgrade-python-pip** + +Category: **Tests** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=upgrade-python-pip,4343ed2d9a974923) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/upgrade-python-pip)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *upgrade,python,pip,python-pip* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "upgrade python pip python-pip" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=upgrade,python,pip,python-pip` + +`cm run script --tags=upgrade,python,pip,python-pip ` + +*or* + +`cmr "upgrade python pip python-pip"` + +`cmr "upgrade python pip python-pip " ` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'upgrade,python,pip,python-pip' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="upgrade,python,pip,python-pip"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=upgrade,python,pip,python-pip) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "upgrade python pip python-pip" ` + +___ +### Customization + +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/upgrade-python-pip/_cm.json)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os) + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/upgrade-python-pip/_cm.json) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/upgrade-python-pip/run.bat) + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/upgrade-python-pip/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/upgrade-python-pip/_cm.json) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/upgrade-python-pip/_cm.json) + +___ +### Script output +`cmr "upgrade python pip python-pip " -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/upgrade-python-pip/_cm.json b/script/upgrade-python-pip/_cm.json new file mode 100644 index 0000000000..e8652e0712 --- /dev/null +++ b/script/upgrade-python-pip/_cm.json @@ -0,0 +1,25 @@ +{ + "alias": "upgrade-python-pip", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Tests", + "deps": [ + { + "tags": "detect,os" + }, + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ], + "tags": [ + "upgrade", + "python", + "pip", + "python-pip" + ], + "uid": "4343ed2d9a974923" +} diff --git a/script/upgrade-python-pip/run.bat b/script/upgrade-python-pip/run.bat new file mode 100644 index 0000000000..b6cc1b3749 --- /dev/null +++ b/script/upgrade-python-pip/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% -m pip install --upgrade pip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/upgrade-python-pip/run.sh b/script/upgrade-python-pip/run.sh new file mode 100644 index 0000000000..389a212e49 --- /dev/null +++ b/script/upgrade-python-pip/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} -m pip install --upgrade pip +test $? -eq 0 || exit $? diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md b/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md new file mode 100644 index 0000000000..836b025dd9 --- /dev/null +++ b/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md @@ -0,0 +1,17 @@ +This is a wrapper script to [Reproduce MLPerf OctoML TinyML Results](https://github.com/octoml/ck/tree/master/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results) +which runs the script for the two microtvm variants and their supported models. + +## Install +``` +cm run script --tags=generate,tiny,octoml,submission +``` + +The above command should produce five elf binaries which can be located inside the respective cache entries given by the below command +``` +cm show cache --tags=reproduce,tiny,octoml,mlperf +``` + +## Install and Flash +``` +cm run script --tags=generate,tiny,octoml,submission --flash +``` diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/README.md b/script/wrapper-reproduce-octoml-tinyml-submission/README.md new file mode 100644 index 0000000000..1ed1c42ed4 --- /dev/null +++ b/script/wrapper-reproduce-octoml-tinyml-submission/README.md @@ -0,0 +1,142 @@ +Automatically generated README for this automation recipe: **wrapper-reproduce-octoml-tinyml-submission** + +Category: **Reproduce MLPerf benchmarks** + +License: **Apache 2.0** + +Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) + +--- +*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=wrapper-reproduce-octoml-tinyml-submission,b946001e289c4480) ] [ [Notes from the authors, contributors and users](README-extra.md) ]* + +--- +#### Summary + +* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission)* +* CM meta description for this script: *[_cm.json](_cm.json)* +* All CM tags to find and reuse this script (see in above meta description): *run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml* +* Output cached? *False* +* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts + + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://access.cknowledge.org/playground/?action=install) +* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@ck``` + +#### Print CM help from the command line + +````cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" --help```` + +#### Customize and run this script from the command line with different variations and flags + +`cm run script --tags=run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml` + +`cm run script --tags=run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml [--input_flags]` + +*or* + +`cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml"` + +`cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags]` + + +#### Run this script from Python + +
+Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
+ + +#### Run this script via GUI + +```cmr "cm gui" --script="run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
+Click here to expand this section. + +* `--flash=value` → `CM_FLASH_BOARD=value` +* `--recreate_binary=value` → `CM_RECREATE_BINARY=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "flash":...} +``` + +
+ +#### Default environment + +
+Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
+ +#### Versions +Default version: `r1.0` + +* `r1.0` +___ +### Dependencies on other CM scripts + + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json) + +___ +### Script output +`cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json b/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json new file mode 100644 index 0000000000..c5ddffdd3f --- /dev/null +++ b/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json @@ -0,0 +1,38 @@ +{ + "alias": "wrapper-reproduce-octoml-tinyml-submission", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "category": "Reproduce MLPerf benchmarks", + "default_version": "r1.0", + "deps": [ + { + "names": [ + "python", + "python3" + ], + "tags": "get,python3" + } + ], + "env": {}, + "input_mapping": { + "flash": "CM_FLASH_BOARD", + "recreate_binary": "CM_RECREATE_BINARY" + }, + "tags": [ + "run", + "generate-tiny", + "generate", + "submission", + "tiny", + "generate-tiny-submission", + "results", + "mlcommons", + "mlperf", + "octoml" + ], + "uid": "b946001e289c4480", + "versions": { + "r1.0": { + } + } +} diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/customize.py b/script/wrapper-reproduce-octoml-tinyml-submission/customize.py new file mode 100644 index 0000000000..f859dd1a9d --- /dev/null +++ b/script/wrapper-reproduce-octoml-tinyml-submission/customize.py @@ -0,0 +1,37 @@ +from cmind import utils +import os +import cmind as cm + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + inp = i['input'] + if 'CM_FLASH_BOARD' in env: + script_tags = "flash,tiny" + else: + script_tags = "reproduce,tiny,mlperf,octoml" + boards = ["NUCLEO", "NRF" ] + microtvm_variants = { "cmsis_nn": [ "ad", "ic", "vww", "kws" ], "native": [ "ic", "ad", "vww", "kws"] } + for board in boards: + for microtvm_variant in microtvm_variants: + if board == "NRF" and microtvm_variant == "native": + continue + for model in microtvm_variants[microtvm_variant]: + variation_tags_string="_"+board+",_"+microtvm_variant+",_"+model + tags = script_tags + "," + variation_tags_string + if 'CM_RECREATE_BINARY' in env: + r = cm.access({'action':'rm', 'automation':'cache', 'tags': tags, 'force': 'true'}) + if r['return'] > 0: + return r + r = cm.access({'action':'run', 'automation':'script', 'tags': tags, 'quiet': 'true', 'env': env, + 'input': inp, 'state': state, 'add_deps': inp.get('add_deps', {}), 'add_deps_recursive': + inp.get('add_deps_recursive', {})}) + if r['return'] > 0: + return r + + return {'return':0} + +def postprocess(i): + return {'return':0} diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/run.sh b/script/wrapper-reproduce-octoml-tinyml-submission/run.sh new file mode 100644 index 0000000000..e69de29bb2