From e9f69634fe1515fbbd7c2bcc7599ff85d01c0cef Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Tue, 18 Feb 2025 17:29:57 +0100 Subject: [PATCH] fixed unknown '-v' to show CMX version --- CONTRIBUTORS.md | 4 +- COPYRIGHT.txt | 2 + cm/cmind/__init__.py | 2 +- cm/cmind/config.py | 2 +- cm/cmind/core.py | 2 +- cmx/mlperf-inference/LICENSE.md | 177 ++++ cmx/mlperf-inference/v4.1/README.md | 5 +- cmx/mlperf-inference/v5.0/LICENSE.md | 177 ++++ cmx/mlperf-inference/v5.0/README.md | 5 +- .../v5.0/benchmarks/language/bert.md | 34 + .../v5.0/benchmarks/language/get-bert-data.md | 43 + .../v5.0/benchmarks/language/get-gptj-data.md | 30 + .../language/get-llama3_1-405b-data.md | 41 + .../language/get-mixtral-8x7b-data.md | 28 + .../v5.0/benchmarks/language/gpt-j.md | 37 + .../v5.0/benchmarks/language/llama2-70b.md | 34 + .../v5.0/benchmarks/language/llama3_1-405b.md | 13 + .../v5.0/benchmarks/language/mixtral-8x7b.md | 11 + .../reproducibility/indyscc24-bert.md | 69 ++ docs/requirements.txt | 2 + main.py | 800 +++++++++++++++++- mkdocs.yml | 7 + 22 files changed, 1504 insertions(+), 21 deletions(-) create mode 100644 cmx/mlperf-inference/LICENSE.md create mode 100644 cmx/mlperf-inference/v5.0/LICENSE.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/bert.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/get-bert-data.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/get-gptj-data.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/get-llama3_1-405b-data.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/get-mixtral-8x7b-data.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/gpt-j.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/llama2-70b.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/llama3_1-405b.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/mixtral-8x7b.md create mode 100644 cmx/mlperf-inference/v5.0/benchmarks/language/reproducibility/indyscc24-bert.md diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 95a3d06713..87c936eebb 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -76,7 +76,7 @@ and contributors listed here in alphabetical order: * Bojian Zheng (University of Toronto) * Thomas Zhu (Oxford University) -See more acknowledgments at the end of this [article](https://arxiv.org/abs/2406.16791), +See additional acknowledgments at the end of this [article](https://arxiv.org/abs/2406.16791), which describes the Collective Mind workflow automation framework. # Legacy Collective Knowledge framework (CK) @@ -138,5 +138,5 @@ which describes the Collective Mind workflow automation framework. * @filven * @ValouBambou -See more acknowledgments at the end of this [article](https://doi.org/10.1098/rsta.2020.0211), +See additional acknowledgments at the end of this [article](https://doi.org/10.1098/rsta.2020.0211), which describes the original Collective Knowledge workflow automation framework. diff --git a/COPYRIGHT.txt b/COPYRIGHT.txt index 9c84f3fbe1..a55539f5db 100644 --- a/COPYRIGHT.txt +++ b/COPYRIGHT.txt @@ -3,3 +3,5 @@ Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. Copyright (c) 2014-2021 cTuning foundation + +Some code and or documentation is reciprocally reused among MLCommons GitHub repositories under the same Apache 2.0 license. diff --git a/cm/cmind/__init__.py b/cm/cmind/__init__.py index 2602dbab6d..b117c0b9ac 100644 --- a/cm/cmind/__init__.py +++ b/cm/cmind/__init__.py @@ -9,7 +9,7 @@ # White paper: https://arxiv.org/abs/2406.16791 # Project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md -__version__ = "4.1.2" +__version__ = "4.1.2.1" from cmind.core import access from cmind.core import x diff --git a/cm/cmind/config.py b/cm/cmind/config.py index a67046c105..78b317c36d 100644 --- a/cm/cmind/config.py +++ b/cm/cmind/config.py @@ -46,7 +46,7 @@ def __init__(self, config_file = None): "error_prefix": "CM error:", "error_prefix2": "CMX detected a problem", "info_cli": "cm {action} {automation} {artifact(s)} {flags} @input.yaml @input.json", - "info_clix": "cmx {action} {automation} {artifact(s)} {CMX control flags (-)} {CMX automation flags (--)}", + "info_clix": "cmx {action} {automation} {artifact(s)} {CMX control flags (-)} {CMX automation flags (--)}\ncmx -h", "default_home_dir": "CM", diff --git a/cm/cmind/core.py b/cm/cmind/core.py index 53cb23469f..8ae0dfaf45 100644 --- a/cm/cmind/core.py +++ b/cm/cmind/core.py @@ -862,7 +862,7 @@ def x(self, i, out = None): # compatibility with older commands like cm/cmx rm cache -f unknown_control_flags = [flag for flag in control_flags if flag not in [ - 'h', 'help', 'version', 'out', 'j', 'json', + 'h', 'help', 'v', 'version', 'out', 'j', 'json', 'save_to_json_file', 'save_to_yaml_file', 'common', 'ignore_inheritance', 'log', 'logfile', 'raise', 'repro', 'i', 'f', 'time', 'profile']] diff --git a/cmx/mlperf-inference/LICENSE.md b/cmx/mlperf-inference/LICENSE.md new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/cmx/mlperf-inference/LICENSE.md @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/cmx/mlperf-inference/v4.1/README.md b/cmx/mlperf-inference/v4.1/README.md index a0990367ef..c6b8175761 100644 --- a/cmx/mlperf-inference/v4.1/README.md +++ b/cmx/mlperf-inference/v4.1/README.md @@ -1 +1,4 @@ -TBD +Source: https://github.com/mlcommons/inference +License: https://github.com/mlcommons/inference/blob/master/LICENSE.md + +This documentation is adapted to the MLCommons CMX framework, the next generation of MLCommons CM (drop-in replacement for CM). diff --git a/cmx/mlperf-inference/v5.0/LICENSE.md b/cmx/mlperf-inference/v5.0/LICENSE.md new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/cmx/mlperf-inference/v5.0/LICENSE.md @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/cmx/mlperf-inference/v5.0/README.md b/cmx/mlperf-inference/v5.0/README.md index a0990367ef..c6b8175761 100644 --- a/cmx/mlperf-inference/v5.0/README.md +++ b/cmx/mlperf-inference/v5.0/README.md @@ -1 +1,4 @@ -TBD +Source: https://github.com/mlcommons/inference +License: https://github.com/mlcommons/inference/blob/master/LICENSE.md + +This documentation is adapted to the MLCommons CMX framework, the next generation of MLCommons CM (drop-in replacement for CM). diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/bert.md b/cmx/mlperf-inference/v5.0/benchmarks/language/bert.md new file mode 100644 index 0000000000..57b1cf2246 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/bert.md @@ -0,0 +1,34 @@ +--- +hide: + - toc +--- + +# Question Answering using Bert-Large + +=== "MLCommons-Python" + ## MLPerf Reference Implementation in Python + +{{ mlperf_inference_implementation_readme (4, "bert-99", "reference") }} + +{{ mlperf_inference_implementation_readme (4, "bert-99.9", "reference") }} + +=== "Nvidia" + ## Nvidia MLPerf Implementation + +{{ mlperf_inference_implementation_readme (4, "bert-99", "nvidia") }} + +{{ mlperf_inference_implementation_readme (4, "bert-99.9", "nvidia") }} + +=== "Intel" + ## Intel MLPerf Implementation + +{{ mlperf_inference_implementation_readme (4, "bert-99", "intel") }} + +{{ mlperf_inference_implementation_readme (4, "bert-99.9", "intel") }} + +=== "Qualcomm" + ## Qualcomm AI100 MLPerf Implementation + +{{ mlperf_inference_implementation_readme (4, "bert-99", "qualcomm") }} + +{{ mlperf_inference_implementation_readme (4, "bert-99.9", "qualcomm") }} diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/get-bert-data.md b/cmx/mlperf-inference/v5.0/benchmarks/language/get-bert-data.md new file mode 100644 index 0000000000..430031f319 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/get-bert-data.md @@ -0,0 +1,43 @@ +--- +hide: + - toc +--- + +# Question Answering using Bert-Large + +## Dataset + +The benchmark implementation run command will automatically download the validation and calibration datasets and do the necessary preprocessing. In case you want to download only the datasets, you can use the below commands. + +=== "Validation" + BERT validation run uses the SQuAD v1.1 dataset. + + ### Get Validation Dataset + ``` + mlcr get,dataset,squad,validation -j + ``` + +## Model +The benchmark implementation run command will automatically download the required model and do the necessary conversions. In case you want to only download the official model, you can use the below commands. + +Get the Official MLPerf Bert-Large Model + +=== "Pytorch" + + ### Pytorch + ``` + mlcr get,ml-model,bert-large,_pytorch -j + ``` +=== "Onnx" + + ### Onnx + ``` + mlcr get,ml-model,bert-large,_onnx -j + ``` +=== "Tensorflow" + + ### Tensorflow + ``` + mlcr get,ml-model,bert-large,_tensorflow -j + ``` + diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/get-gptj-data.md b/cmx/mlperf-inference/v5.0/benchmarks/language/get-gptj-data.md new file mode 100644 index 0000000000..34140598e9 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/get-gptj-data.md @@ -0,0 +1,30 @@ +--- +hide: + - toc +--- + +# Text Summarization using GPT-J + +## Dataset + +The benchmark implementation run command will automatically download the validation and calibration datasets and do the necessary preprocessing. In case you want to download only the datasets, you can use the below commands. + +=== "Validation" + GPT-J validation run uses the CNNDM dataset. + + ### Get Validation Dataset + ``` + mlcr get,dataset,cnndm,validation -j + ``` + +## Model +The benchmark implementation run command will automatically download the required model and do the necessary conversions. In case you want to only download the official model, you can use the below commands. + +Get the Official MLPerf GPT-J Model + +=== "Pytorch" + + ### Pytorch + ``` + mlcr get,ml-model,gptj,_pytorch -j + ``` diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/get-llama3_1-405b-data.md b/cmx/mlperf-inference/v5.0/benchmarks/language/get-llama3_1-405b-data.md new file mode 100644 index 0000000000..e1a5e1c3f6 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/get-llama3_1-405b-data.md @@ -0,0 +1,41 @@ +--- +hide: + - toc +--- + +# Text Summarization using LLAMA3.1-405b + +## Dataset + +The benchmark implementation run command will automatically download the validation and calibration datasets and do the necessary preprocessing. In case you want to download only the datasets, you can use the below commands. + +=== "Validation" + + ### Get Validation Dataset + ``` + mlcr get,dataset,mlperf,inference,llama3,_validation --outdirname= -j + ``` + +=== "Calibration" + + ### Get Calibration Dataset + ``` + mlcr get,dataset,mlperf,inference,llama3,_calibration --outdirname= -j + ``` + +## Model +The benchmark implementation run command will automatically download the required model and do the necessary conversions. In case you want to only download the official model, you can use the below commands. + +Get the Official MLPerf LLAMA3.1-405b Model + +=== "Pytorch" + + ### Pytorch + ``` + mlcr get,ml-model,llama3 --outdirname= --hf_token= -j + ``` + +!!! tip + + Downloading llama3.1-405B model from Hugging Face will require an [**access token**](https://huggingface.co/settings/tokens) which could be generated for your account. Additionally, ensure that your account has access to the [llama3.1-405B](https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct) model. + diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/get-mixtral-8x7b-data.md b/cmx/mlperf-inference/v5.0/benchmarks/language/get-mixtral-8x7b-data.md new file mode 100644 index 0000000000..81b90cdb57 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/get-mixtral-8x7b-data.md @@ -0,0 +1,28 @@ +--- +hide: + - toc +--- + +## Dataset + +The benchmark implementation run command will automatically download the preprocessed validation and calibration datasets. In case you want to download only the datasets, you can use the below commands. + +=== "Validation" + mixtral-8x7b validation run uses the combined dataset - Open ORCA, GSM8K and MBXP. + + ### Get Validation Dataset + ``` + mlcr get,dataset-mixtral,openorca-mbxp-gsm8k-combined -j + ``` + +## Model +The benchmark implementation run command will automatically download the required model and do the necessary conversions. In case you want to only download the official model, you can use the below commands. + +Get the Official MLPerf MIXTRAL-8x7b Model + +=== "Pytorch" + + ### Pytorch + ``` + mlcr get,ml-model,mixtral -j + ``` \ No newline at end of file diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/gpt-j.md b/cmx/mlperf-inference/v5.0/benchmarks/language/gpt-j.md new file mode 100644 index 0000000000..d2f5458152 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/gpt-j.md @@ -0,0 +1,37 @@ +--- +hide: + - toc +--- + +# Text Summarization using GPT-J + +=== "MLCommons-Python" + ## MLPerf Reference Implementation in Python + + +{{ mlperf_inference_implementation_readme (4, "gptj-99", "reference") }} + + +{{ mlperf_inference_implementation_readme (4, "gptj-99.9", "reference") }} + +=== "Nvidia" + ## Nvidia MLPerf Implementation + + +{{ mlperf_inference_implementation_readme (4, "gptj-99", "nvidia") }} + + +{{ mlperf_inference_implementation_readme (4, "gptj-99.9", "nvidia") }} + +=== "Intel" + ## Intel MLPerf Implementation + +{{ mlperf_inference_implementation_readme (4, "gptj-99", "intel") }} + + +=== "Qualcomm" + ## Qualcomm AI100 MLPerf Implementation + + +{{ mlperf_inference_implementation_readme (4, "gptj-99", "qualcomm") }} + diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/llama2-70b.md b/cmx/mlperf-inference/v5.0/benchmarks/language/llama2-70b.md new file mode 100644 index 0000000000..40c62cf714 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/llama2-70b.md @@ -0,0 +1,34 @@ +--- +hide: + - toc +--- + +# Text Summarization using LLAMA2-70b + +=== "MLCommons-Python" + ## MLPerf Reference Implementation in Python + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99", "reference") }} + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99.9", "reference") }} + +=== "Nvidia" + ## Nvidia MLPerf Implementation + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99", "nvidia") }} + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99.9", "nvidia") }} + +=== "Neural Magic" + ## Neural Magic MLPerf Implementation + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99", "neuralmagic") }} + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99.9", "neuralmagic") }} + +=== "AMD" + ## AMD MLPerf Implementation + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99", "amd") }} + +{{ mlperf_inference_implementation_readme (4, "llama2-70b-99.9", "amd") }} diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/llama3_1-405b.md b/cmx/mlperf-inference/v5.0/benchmarks/language/llama3_1-405b.md new file mode 100644 index 0000000000..8163bb1e8e --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/llama3_1-405b.md @@ -0,0 +1,13 @@ +--- +hide: + - toc +--- + +# Text Summarization using LLAMA3_1-405b + +=== "MLCommons-Python" + ## MLPerf Reference Implementation in Python + +{{ mlperf_inference_implementation_readme (4, "llama3_1-405b-99", "reference", devices=["CPU","CUDA"]) }} + +{{ mlperf_inference_implementation_readme (4, "llama3_1-405b-99.9", "reference", devices=["CPU","CUDA"]) }} \ No newline at end of file diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/mixtral-8x7b.md b/cmx/mlperf-inference/v5.0/benchmarks/language/mixtral-8x7b.md new file mode 100644 index 0000000000..bdb26ae770 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/mixtral-8x7b.md @@ -0,0 +1,11 @@ +--- +hide: + - toc +--- + +# Question Answering, Math, and Code Generation using Mixtral-8x7B + +=== "MLCommons-Python" + ## MLPerf Reference Implementation in Python + +{{ mlperf_inference_implementation_readme (4, "mixtral-8x7b", "reference") }} diff --git a/cmx/mlperf-inference/v5.0/benchmarks/language/reproducibility/indyscc24-bert.md b/cmx/mlperf-inference/v5.0/benchmarks/language/reproducibility/indyscc24-bert.md new file mode 100644 index 0000000000..a9df271178 --- /dev/null +++ b/cmx/mlperf-inference/v5.0/benchmarks/language/reproducibility/indyscc24-bert.md @@ -0,0 +1,69 @@ +--- +hide: + - toc +--- + +# Question and Answering using Bert Large for IndySCC 2024 + +## Introduction + +This guide is designed for the [IndySCC 2024](https://sc24.supercomputing.org/students/indyscc/) to walk participants through running and optimizing the [MLPerf Inference Benchmark](https://arxiv.org/abs/1911.02549) using [Bert Large](https://github.com/mlcommons/inference/tree/master/language/bert#supported-models) across various software and hardware configurations. The goal is to maximize system throughput (measured in samples per second) without compromising accuracy. + +For a valid MLPerf inference submission, two types of runs are required: a performance run and an accuracy run. In this competition, we focus on the `Offline` scenario, where throughput is the key metric—higher values are better. The official MLPerf inference benchmark for Bert Large requires processing a minimum of 10833 samples in both performance and accuracy modes using the Squad v1.1 dataset. + +## Scoring + +In the IndySCC 2024, your objective will be to run a reference (unoptimized) Python implementation of the MLPerf inference benchmark to complete a successful submission passing the submission checker. Only one of the available framework needs to be submitted. + + +!!! info + Both MLPerf and MLC automation are evolving projects. + If you encounter issues or have questions, please submit them [here](https://github.com/mlcommons/cm4mlops/issues) + +## Artifacts to submit to the SCC committee +All the needed files are automatically pushed to the GitHub repository if you manage to complete the given commands. No additional files need to be submitted. + + +=== "MLCommons-Python" + ## MLPerf Reference Implementation in Python + +{{ mlperf_inference_implementation_readme (4, "bert-99", "reference", extra_variation_tags="", fixed_scenarios=["Offline"],categories=["Edge"], setup_tips=False) }} + + +## Submission Commands + +### Generate actual submission tree + +```bash +mlcr generate,inference,submission \ + --clean \ + --run-checker \ + --tar=yes \ + --env.CM_TAR_OUTFILE=submission.tar.gz \ + --division=open \ + --category=edge \ + --env.CM_DETERMINE_MEMORY_CONFIGURATION=yes \ + --run_style=test \ + --quiet \ + --submitter= +``` + +* Use `--hw_name="My system name"` to give a meaningful system name. + + +### Push Results to GitHub + +Fork the `mlperf-inference-results-scc24` branch of the repository URL at [https://github.com/mlcommons/cm4mlperf-inference](https://github.com/mlcommons/cm4mlperf-inference). + +Run the following command after **replacing `--repo_url` with your GitHub fork URL**. + +```bash +mlcr push,github,mlperf,inference,submission \ + --repo_url=https://github.com//cm4mlperf-inference \ + --repo_branch=mlperf-inference-results-scc24 \ + --commit_message="Results on system " \ + --quiet +``` + +Once uploaded give a Pull Request to the origin repository. Github action will be running there and once +finished you can see your submitted results at [https://docs.mlcommons.org/cm4mlperf-inference](https://docs.mlcommons.org/cm4mlperf-inference). diff --git a/docs/requirements.txt b/docs/requirements.txt index 39fab4e1ff..293abf164e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,3 +2,5 @@ mkdocs-material swagger-markdown mkdocs-macros-plugin ruamel.yaml +mkdocs-redirects +mkdocs-site-urls diff --git a/main.py b/main.py index c6c4a348eb..2ccfab2958 100644 --- a/main.py +++ b/main.py @@ -1,20 +1,792 @@ def define_env(env): - @env.macro - def dummy(spaces): - pre_space = "" - for i in range(1,spaces): - pre_space = pre_space + " " - f_pre_space = pre_space - pre_space += " " + @env.macro + def mlperf_inference_implementation_readme( + spaces, + model, + implementation, + *, + implementation_tips=True, + setup_tips=True, + run_tips=True, + skip_test_query_count=False, + fixed_scenarios=[], + devices=[], + frameworks=[], + categories=[], + extra_variation_tags="", + extra_input_string="", + extra_docker_input_string="", + ): + pre_space = "" - content="" + for i in range(1, spaces): + pre_space = pre_space + " " + f_pre_space = pre_space + pre_space += " " - return content - - @env.macro - def print_str(my_str): + content = "" - content="{{"+my_str+"}}" + execution_envs = ["Docker", "Native"] + code_version = "r4.1-dev" + implementation_run_options = [] - return content + if model == "rnnt": + code_version = "r4.0" + + if implementation == "reference": + # Tip + if model != "rnnt": + code_version = "r5.0-dev" + if "99.9" not in model and implementation_tips: + content += f"\n{pre_space}!!! tip\n\n" + content += f"{pre_space} - MLCommons reference implementations are only meant to provide a rules compliant reference implementation for the submitters and in most cases are not best performing. If you want to benchmark any system, it is advisable to use the vendor MLPerf implementation for that system like Nvidia, Intel etc.\n\n" + + if not devices: + devices = ["CPU", "CUDA", "ROCm"] + + if not frameworks: + if model.lower() == "resnet50": + frameworks = ["Onnxruntime", "Tensorflow", "Deepsparse"] + elif model.lower() == "retinanet": + frameworks = ["Onnxruntime", "Pytorch"] + elif "bert" in model.lower(): + frameworks = ["Pytorch", "Deepsparse"] + elif "llama3" in model.lower(): + frameworks = ["Pytorch"] + else: + frameworks = ["Pytorch"] + + elif implementation == "nvidia": + if model in ["mixtral-8x7b"]: + return pre_space + " WIP" + devices = ["CUDA"] + frameworks = ["TensorRT"] + + elif implementation == "amd": + devices = ["cuda"] + frameworks = ["pytorch"] + execution_envs.remove("Docker") + + elif implementation == "neuralmagic": + devices = ["CUDA"] + frameworks = ["pytorch"] + + elif implementation == "intel": + # Tip + if "99.9" not in model and implementation_tips: + content += f"\n{pre_space}!!! tip\n\n" + content += f"{pre_space} - Intel MLPerf inference implementation is available only for datacenter category and has been tested only on a limited number of systems. Most of the benchmarks using Intel implementation require at least Intel Sapphire Rapids or higher CPU generation.\n\n" + + if model not in [ + "bert-99", + "bert-99.9", + "gptj-99", + "gptj-99.9", + "resnet50", + "retinanet", + "3d-unet-99", + "3d-unet-99.9", + "dlrm-v2-99", + "dlrm-v2-99.9", + "sdxl", + ]: + return pre_space + " WIP" + if model in [ + "bert-99", + "bert-99.9", + "retinanet", + "3d-unet-99", + "3d-unet-99.9", + ]: + code_version = "r4.0" + devices = ["CPU"] + frameworks = ["Pytorch"] + + elif implementation == "qualcomm": + if model not in ["resnet50", "retinanet", "bert-99", "bert-99.9"]: + return pre_space + " WIP" + + devices = ["QAIC"] + frameworks = ["Glow"] + + elif implementation == "cpp": + if not devices: + devices = ["CPU", "CUDA"] + frameworks = ["Onnxruntime"] + + elif implementation == "ctuning-cpp": + fixed_scenarios = ["SingleStream"] + devices = ["CPU"] + if model.lower() == "resnet50": + frameworks = ["TFLite"] + else: + frameworks = [] + + if not categories: + if model.lower() == "bert-99.9": + categories = ["Datacenter"] + elif model.lower() in ["pointpainting"]: + categories = ["Edge"] + elif ( + "dlrm" in model.lower() + or "llama2" in model.lower() + or "mixtral" in model.lower() + or "llama3" in model.lower() + ): + categories = ["Datacenter"] + else: + categories = ["Edge", "Datacenter"] + + # model name + content += f"{pre_space}{model.upper()}\n\n" + + final_run_mode = "valid" if "short" not in extra_variation_tags else "test" + + for category in categories: + if category == "Edge": + scenarios = ["Offline", "SingleStream"] + if model.lower() in [ + "resnet50", "retinanet"] and not "MultiStream" in scenarios: # MultiStream was duplicating + scenarios.append("MultiStream") + if model.lower() in ["pointpainting"]: + scenarios.remove("Offline") + elif category == "Datacenter": + scenarios = ["Offline", "Server"] + if fixed_scenarios: + scenarios = [ + scenario for scenario in scenarios if scenario in fixed_scenarios] + + content += f"{pre_space}=== \"{category.lower()}\"\n\n" + + cur_space = pre_space + " " + scenarios_string = ", ".join(scenarios) + + content += f"{cur_space}### {category} category \n\n{cur_space} In the {category.lower()} category, {model} has {scenarios_string} scenarios and all the scenarios are mandatory for a closed division submission.\n\n" + + for framework in frameworks: + cur_space1 = cur_space + " " + content += f'{cur_space}=== "{framework}"\n' + content += f"{cur_space1}#### {framework} framework\n\n" + + for device in devices: + if framework.lower() == "deepsparse": + if device.lower() != "cpu": + continue + cur_space2 = cur_space1 + " " + cur_space3 = cur_space2 + " " + cur_space4 = cur_space3 + " " + + content += f"{cur_space1}=== \"{device}\"\n" + content += f"{cur_space2}##### {device} device\n\n" + + # minimum system requirements + content += get_min_system_requirements( + + cur_space2, model, implementation, device + ) + + # to select the execution environments(currently Docker and + # Native) + for execution_env in execution_envs: + if ( + device == "ROCm" or implementation == "qualcomm" + ) and execution_env == "Docker": + continue # docker not currently supported for Qualcomm implementation and ROCm device + if implementation == "nvidia" and execution_env == "Native": + continue # Nvidia implementation only supports execution through docker + content += f'{cur_space2}=== "{execution_env}"\n' + content += f"{cur_space3}###### {execution_env} Environment\n\n" + # ref to MLCFlow installation + content += f"{cur_space3}Please refer to the [installation page](site:inference/install/) to install MLCFlow for running the automated benchmark commands.\n\n" + test_query_count = get_test_query_count( + + model, implementation, device.lower() + ) + if ( + device.lower() == "cuda" + and execution_env.lower() == "native" + ): + content += f"\n{cur_space3}!!! tip\n\n" + content += f"{cur_space3} - It is advisable to use the commands in the Docker tab for CUDA. Run the below native command only if you are already on a CUDA setup with cuDNN and TensorRT installed.\n\n" + + if ( + "99.9" not in model + ): # not showing docker command as it is already done for the 99% variant + if implementation == "neuralmagic": + content += ( + f"{cur_space3}####### Run the Inference Server\n" + ) + content += get_inference_server_run_cmd( + spaces + 16, implementation + ) + if run_tips: + # tips regarding the running of nural magic + # server + content += f"\n{cur_space3}!!! tip\n\n" + content += f"{cur_space3} - Host and Port number of the server can be configured through `--host` and `--port` options. Otherwise, server will run on the default host `localhost` and port `8000`.\n\n" + + setup_run_cmd = mlperf_inference_run_command( + spaces + 17, + model, + implementation, + framework.lower(), + category.lower(), + "SingleStream" if model.lower() in [ + "pointpainting"] else "Offline", + device.lower(), + "test", + test_query_count, + True, + skip_test_query_count, + scenarios, + code_version, + extra_variation_tags, + extra_input_string, + extra_docker_input_string, + ) + + common_info = get_common_info( + spaces + 16, + implementation, + model.lower() + ) + + if ( + execution_env == "Native" + ): # Native implementation steps through virtual environment + content += f"{cur_space3}####### Setup a virtual environment for Python\n" + content += get_venv_command(spaces + 16) + content += f"{cur_space3}####### Performance Estimation for Offline Scenario\n" + + content += common_info + + content += setup_run_cmd.replace( + "--docker ", "") + + content += f"{cur_space3}The above command should do a test run of Offline scenario and record the estimated offline_target_qps.\n\n" + + else: # Docker implementation steps + content += f"{cur_space3}####### Docker Container Build and Performance Estimation for Offline Scenario\n" + docker_info = get_docker_info( + spaces + 16, + model, + implementation, + device, + setup_tips, + ) + + content += common_info + + content += docker_info + + content += setup_run_cmd + + if len(scenarios) == 1: + scenario_text = f"""the {scenarios[0]} scenario""" + else: + scenario_text = "each scenario" "" + content += f"{cur_space3}The above command should get you to an interactive shell inside the docker container and do a quick test run for the Offline scenario. Once inside the docker container please do the below commands to do the accuracy + performance runs for {scenario_text}.\n\n" + content += f"{cur_space3}
\n" + content += f"{cur_space3} Please click here to see more options for the docker launch \n\n" + content += f"{cur_space3}* `--docker_mlc_repo=`: to use a custom fork of cm4mlops repository inside the docker image\n\n" + content += f"{cur_space3}* `--docker_mlc_repo_branch=`: to checkout a custom branch of the cloned cm4mlops repository inside the docker image\n\n" + content += f"{cur_space3}* `--docker_cache=no`: to not use docker cache during the image build\n" + + if implementation.lower() == "nvidia": + content += f"{cur_space3}* `--gpu_name=` : The GPUs with supported configs in MLC are `orin`, `rtx_4090`, `rtx_a6000`, `rtx_6000_ada`, `l4`, `t4`and `a100`. For other GPUs, default configuration as per the GPU memory will be used.\n" + + if device.lower() not in ["cuda"]: + content += f"{cur_space3}* `--docker_os=ubuntu`: ubuntu and rhel are supported. \n" + content += f"{cur_space3}* `--docker_os_version=20.04`: [20.04, 22.04] are supported for Ubuntu and [8, 9] for RHEL\n" + + content += f"{cur_space3}
\n" + else: + content += f"{cur_space3} You can reuse the same environment as described for {model.split('.')[0]}.\n" + content += f"{cur_space3}###### Performance Estimation for Offline Scenario\n" + + content += mlperf_inference_run_command( + spaces + 17, + model, + implementation, + framework.lower(), + category.lower(), + "SingleStream" if model.lower() in [ + "pointpainting"] else "Offline", + device.lower(), + "test", + test_query_count, + True, + skip_test_query_count, + scenarios, + code_version, + ).replace("--docker ", "") + content += f"{cur_space3}The above command should do a test run of Offline scenario and record the estimated offline_target_qps.\n\n" + + run_suffix = "" + run_suffix += f"{cur_space3}
\n" + run_suffix += f"{cur_space3} Please click here to see more options for the RUN command\n\n" + run_suffix += f"{cur_space3}* Use `--division=closed` to do a closed division submission which includes compliance runs\n\n" + run_suffix += f"{cur_space3}* Use `--rerun` to do a rerun even when a valid run exists\n" + run_suffix += f"{cur_space3}* Use `--compliance` to do the compliance runs (only applicable for closed division) once the valid runs are successful\n" + + if implementation.lower() == "nvidia": + run_suffix += f"{cur_space3}* `--gpu_name=` : The GPUs with supported configs in MLC are `orin`, `rtx_4090`, `rtx_a6000`, `rtx_6000_ada`, `l4`, `t4`and `a100`. For other GPUs, default configuration as per the GPU memory will be used.\n" + run_suffix += f"{cur_space3}
\n\n" + + if ( + "resnet50" in model.lower() + and framework.lower() == "deepsparse" + ): + run_suffix += f"{cur_space3}You can use any model from [NeuralMagic sparse zoo](https://sparsezoo.neuralmagic.com/?modelSet=computer_vision&architectures=resnet_v1) (trained on Imagenet dataset) as --nm_model_zoo_stub" + if ( + "bert" in model.lower() + and framework.lower() == "deepsparse" + ): + run_suffix += "You can use any model from [NeuralMagic sparse zoo](https://sparsezoo.neuralmagic.com/?modelSet=computer_vision&architectures=resnet_v1) (trained on Imagenet dataset) as --nm_model_zoo_stub" + if ( + "bert" in model.lower() + and framework.lower() == "deepsparse" + ): + run_suffix += f"{cur_space3}
\n" + run_suffix += f"{cur_space3} Please click here to view available generic model stubs for bert deepsparse\n\n" + run_suffix += f"{cur_space3}* **obert-large-pruned95_quant-none-vnni:** zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni\n\n" + run_suffix += f"{cur_space3}* **mobilebert-none-14layer_pruned50_quant-none-vnni:** zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni\n\n" + run_suffix += f"{cur_space3}* **mobilebert-none-base_quant-none:** zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none\n\n" + run_suffix += f"{cur_space3}* **bert-base-pruned95_obs_quant-none:** zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none\n\n" + run_suffix += f"{cur_space3}* **mobilebert-none-14layer_pruned50-none-vnni:** zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni\n\n" + run_suffix += f"{cur_space3}* **obert-base-pruned90-none:** zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none\n\n" + run_suffix += f"{cur_space3}* **obert-large-pruned97_quant-none:** zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none\n\n" + run_suffix += f"{cur_space3}* **bert-base-pruned90-none:** zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none\n\n" + run_suffix += f"{cur_space3}* **bert-large-pruned80_quant-none-vnni:** zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni\n\n" + run_suffix += f"{cur_space3}* **obert-large-pruned95-none-vnni:** zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni\n\n" + run_suffix += f"{cur_space3}* **obert-large-pruned97-none:** zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none\n\n" + run_suffix += f"{cur_space3}* **bert-large-base-none:** zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none\n\n" + run_suffix += f"{cur_space3}* **obert-large-base-none:** zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none\n\n" + run_suffix += f"{cur_space3}* **mobilebert-none-base-none:** zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none\n" + run_suffix += f"{cur_space3}
\n" + + for scenario in scenarios: + content += f"{cur_space3}=== \"{scenario}\"\n{cur_space4}###### {scenario}\n\n" + run_cmd = mlperf_inference_run_command( + spaces + 21, + model, + implementation, + framework.lower(), + category.lower(), + scenario, + device.lower(), + final_run_mode, + test_query_count, + False, + skip_test_query_count, + scenarios, + code_version, + extra_variation_tags, + extra_input_string, + ) + content += run_cmd + # content += run_suffix + + if len(scenarios) > 1: + content += f"{cur_space3}=== \"All Scenarios\"\n{cur_space4}###### All Scenarios\n\n" + run_cmd = mlperf_inference_run_command( + spaces + 21, + model, + implementation, + framework.lower(), + category.lower(), + "All Scenarios", + device.lower(), + final_run_mode, + test_query_count, + False, + skip_test_query_count, + scenarios, + code_version, + extra_variation_tags, + extra_input_string, + ) + content += run_cmd + + content += run_suffix + + readme_prefix = get_readme_prefix( + spaces, model, implementation, extra_variation_tags + ) + + readme_suffix = get_readme_suffix( + spaces, model, implementation, extra_variation_tags + ) + + return readme_prefix + content + readme_suffix + + def get_test_query_count(model, implementation, device, num_devices=1): + + if model == "resnet50": + p_range = 1000 + elif model in ["retinanet", "bert-99", "bert-99.9"]: + p_range = 100 + else: + p_range = 10 + + if device == "cuda": + p_range *= 5 + p_range *= num_devices + + return p_range + + def get_min_system_requirements(spaces, model, implementation, device): + model = model.lower() + min_sys_req_content = "" + min_sys_req_content += f"{spaces}
\n" + min_sys_req_content += f"{spaces}Please click here to see the minimum system requirements for running the benchmark\n\n" + # device memory + if device.lower() == "cuda" and ( + implementation.lower() == "nvidia" or implementation.lower() == "reference" + ): + if implementation.lower() == "nvidia": + if "dlrm" in model: + device_memory = "24GB" + elif "llama2-70b" in model or "mixtral" in model: + device_memory = "80GB" + elif "sdxl" in model or "gptj" in model: + device_memory = "16GB" + else: + device_memory = "8GB" + elif implementation.lower() == "reference": + if "dlrm" in model: + device_memory = "2x80GB" + elif "llama2-70b" in model: + device_memory = "8x80GB" + elif "mixtral" in model: + device_memory = "4x80GB" + elif "sdxl" in model: + device_memory = "24GB(fp32), 16GB(fp16)" + elif "gptj" in model: + device_memory = "80GB(fp32). 40GB(fp16)" + elif "pointpainting" in model: + device_memory = "To be updated" + else: + device_memory = "8GB" + min_sys_req_content += f"{spaces}* **Device Memory**: {device_memory}\n\n" + # disk space + if "dlrm" in model: + disk_space = "500GB" + elif "llama2-70b" in model: + disk_space = "700GB" + elif "mixtral" in model: + disk_space = "100GB" + elif "retinanet" in model: + disk_space = "200GB" + elif "pointpainting" in model: + disk_space = "To be updated" + else: + disk_space = "50GB" + min_sys_req_content += f"{spaces}* **Disk Space**: {disk_space}\n\n" + # System memory + if "dlrm" in model: + system_memory = "512GB" + min_sys_req_content += ( + f"{spaces}* **System Memory(RAM+SWAP)**: {system_memory}\n\n" + ) + min_sys_req_content += f"{spaces}
\n" + return min_sys_req_content + + def get_inference_server_run_cmd(spaces, implementation): + indent = " " * spaces + " " + if implementation == "neuralmagic": + pre_space = " " * spaces + return f"""\n +{pre_space}```bash +{pre_space}mlcr run,vllm-server \\ +{indent}--model=nm-testing/Llama-2-70b-chat-hf-FP8 \\ +{indent}--vllm_model_name=nm-testing/Llama-2-70b-chat-hf-FP8 \\ +{indent}--quiet +{pre_space}```\n""" + + def get_venv_command(spaces): + pre_space = " " * spaces + return f"""\n +{pre_space}```bash +{pre_space}mlcr install,python-venv --name=mlperf +{pre_space}export MLC_SCRIPT_EXTRA_CMD=\"--adr.python.name=mlperf\" +{pre_space}```\n""" + + # contains run command information which is common to both docker and + # native runs + def get_common_info(spaces, implementation, model): + info = "" + pre_space = "" + for i in range(1, spaces): + pre_space = pre_space + " " + pre_space += " " + # pre_space = " " + info += f"\n{pre_space}!!! tip\n\n" + info += f"{pre_space} - Compliance runs can be enabled by adding `--compliance=yes`.\n\n" + if model.lower() not in ["pointpainting"]: + info += f"{pre_space} - Number of threads could be adjusted using `--threads=#`, where `#` is the desired number of threads. This option works only if the implementation in use supports threading.\n\n" + info += f"{pre_space} - Batch size could be adjusted using `--batch_size=#`, where `#` is the desired batch size. This option works only if the implementation in use is supporting the given batch size.\n\n" + elif model.lower() in ["pointpainting"]: + info += f"{pre_space} - The maximum duration for a performance run can be disabled by using `--env.MLC_MLPERF_USE_MAX_DURATION=no`.\n\n" + info += f"{pre_space} - In valid execution mode, the query count for performance mode can be adjusted using `--env.MLC_MLPERF_LOADGEN_QUERY_COUNT=`.\n\n" + + if implementation.lower() == "reference" and model.lower() not in [ + "pointpainting"]: + + info += f"{pre_space} - `_r4.1-dev` could also be given instead of `_r5.0-dev` if you want to run the benchmark with the MLPerf version being 4.1.\n\n" + if model == "rgat": + info += f"{pre_space} - Add `--env.MLC_DATASET_IGBH_PATH=` if you have already downloaded the dataset. The path will be automatically mounted when using docker run.\n\n" + info += f"{pre_space} - Add `--env.MLC_ML_MODEL_RGAT_CHECKPOINT_PATH=` if you have already downloaded the model. The path will be automatically mounted when using docker run.\n\n" + if implementation.lower() == "reference": + info += f"{pre_space} - Add `--adr.mlperf-implementation.tags=_branch.master,_repo.` if you are modifying the official MLPerf Inference implementation in a custom fork.\n\n" + info += f"{pre_space} - Add `--adr.inference-src.tags=_repo.` if you are modifying the model config accuracy script in the submission checker within a custom fork.\n\n" + info += f"{pre_space} - Add `--adr.inference-src.version=custom` if you are using the modified MLPerf Inference code or accuracy script on submission checker within a custom fork.\n\n" + + return info + + def get_docker_info(spaces, model, implementation, + device, setup_tips=True): + info = "" + pre_space = "" + for i in range(1, spaces): + pre_space = pre_space + " " + pre_space += " " + # pre_space = " " + if setup_tips: + info += f"\n{pre_space}!!! tip\n\n" + + if model == "sdxl": + info += f"{pre_space} - `--env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes` option can be used to download the model on the host so that it can be reused across different container lanuches. \n\n" + elif "llama3" in model.lower(): + info += f"{pre_space} - `--env.MLC_MLPERF_MODEL_LLAMA3_DOWNLOAD_TO_HOST=yes` option can be used to download the model on the host so that it can be reused across different container lanuches. \n\n" + info += f"{pre_space} - `--env.MLC_MLPERF_DATASET_LLAMA3_DOWNLOAD_TO_HOST=yes` option can be used to download the dataset on the host so that it can be reused across different container lanuches. \n\n" + if implementation.lower() == "nvidia": + info += f"{pre_space} - Default batch size is assigned based on [GPU memory](https://github.com/mlcommons/cm4mlops/blob/dd0c35856969c68945524d5c80414c615f5fe42c/script/app-mlperf-inference-nvidia/_cm.yaml#L1129) or the [specified GPU](https://github.com/mlcommons/cm4mlops/blob/dd0c35856969c68945524d5c80414c615f5fe42c/script/app-mlperf-inference-nvidia/_cm.yaml#L1370). Please click more option for *docker launch* or *run command* to see how to specify the GPU name.\n\n" + info += f"{pre_space} - When run with `--all_models=yes`, all the benchmark models of NVIDIA implementation can be executed within the same container.\n\n" + if "llama2" in model.lower(): + info += f"{pre_space} - The dataset for NVIDIA's implementation of Llama2 is not publicly available. The user must fill [this](https://docs.google.com/forms/d/e/1FAIpQLSc_8VIvRmXM3I8KQaYnKf7gy27Z63BBoI_I1u02f4lw6rBp3g/viewform?pli=1&fbzx=-8842630989397184967) form and be verified as a MLCommons member to access the dataset.\n\n" + info += f"{pre_space} - `PATH_TO_PICKE_FILE` should be replaced with path to the downloaded pickle file.\n\n" + else: + if model == "sdxl": + info += f"\n{pre_space}!!! tip\n\n" + info += f"{pre_space} - `--env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes` option can be used to download the model on the host so that it can be reused across different container lanuches. \n\n" + + # return empty string if nothing is filled inside the tip + if info == f"\n{pre_space}!!! tip\n\n": + return "" + + return info + + def get_readme_prefix(spaces, model, implementation, extra_variation_tags): + readme_prefix = "" + pre_space = " " + # for i in range(1,spaces): + # pre_space = pre_space + " " + # pre_space += " " + + return readme_prefix + + def get_readme_suffix(spaces, model, implementation, extra_variation_tags): + readme_suffix = "" + pre_space = "" + for i in range(1, spaces): + pre_space = pre_space + " " + pre_space += " " + + if implementation == "reference" and not extra_variation_tags: + if not model.endswith("-99"): + model_base_name = model.replace("-99.9", "").replace("-99", "") + readme_suffix += f"{pre_space}* If you want to download the official MLPerf model and dataset for {model} you can follow [this README](get-{model_base_name}-data.md).\n" + if model == "resnet50": + readme_suffix += f"{pre_space}* Please see [mobilenets.md](mobilenets.md) for running mobilenet models for Image Classification." + return readme_suffix + + def get_run_cmd_extra( + f_pre_space, + model, + implementation, + device, + scenario, + scenarios=[], + run_tips=True, + extra_input_string="", + ): + extra_content = "" + f_pre_space += "" + if scenario == "Server" or ( + scenario == "All Scenarios" and "Server" in scenarios + ): + extra_content += f"{f_pre_space} * `` must be determined manually. It is usually around 80% of the Offline QPS, but on some systems, it can drop below 50%. If a higher value is specified, the latency constraint will not be met, and the run will be considered invalid.\n" + if ( + implementation == "reference" + and model in ["sdxl", "gptj-99", "gptj-99.9"] + and device in ["cuda", "rocm"] + and "precision" not in extra_input_string + ): + extra_content += f"{f_pre_space} * `--precision=float16` can help run on GPUs with less RAM / gives better performance \n" + if ( + implementation == "reference" + and model in ["sdxl", "gptj-99", "gptj-99.9"] + and device in ["cpu"] + and "precision" not in extra_input_string + ): + extra_content += f"{f_pre_space} * `--precision=bfloat16` can give better performance \n" + if "gptj" in model and implementation == "reference": + extra_content += f"{f_pre_space} * `--beam-size=1` Beam size of 4 is mandatory for a closed division submission but reducing the beam size can help in running the model on GPUs with lower device memory\n" + if "pointpainting" in model and implementation == "reference": + extra_content += f"{f_pre_space} * The `pointpainting_checkpoint_path`, `deeplab_resnet50_path` and `waymo_path` do not need to be provided inside the Docker container as they are already registered in the MLC cache.\n" + if extra_content: + extra_content = f"{f_pre_space}!!! tip\n\n" + extra_content + + if run_tips: + return extra_content + else: + return "" + + @env.macro + def mlperf_inference_run_command( + spaces, + model, + implementation, + framework, + category, + scenario, + device="cpu", + execution_mode="test", + test_query_count="20", + docker=False, + skip_test_query_count=False, + scenarios=[], + code_version="r4.1-dev", + extra_variation_tags="", + extra_input_string="", + extra_docker_input_string="", + ): + pre_space = "" + for i in range(1, spaces): + pre_space = pre_space + " " + f_pre_space = pre_space + pre_space += " " + + if scenario == "All Scenarios": + scenario_variation_tag = ",_all-scenarios" + scenario_option = "" + else: + scenario_variation_tag = "" + scenario_option = f"\\\n{pre_space} --scenario={scenario}" + + if scenario == "Server" or ( + scenario == "All Scenarios" and "Server" in scenarios + ): + scenario_option += ( + f"\\\n{pre_space} --server_target_qps=" + ) + + run_cmd_extra = get_run_cmd_extra( + f_pre_space, + model, + implementation, + device, + scenario, + scenarios, + True, + extra_input_string, + ) + + if docker: + docker_cmd_suffix = f" \\\n{pre_space} --docker --quiet" + if not skip_test_query_count: + docker_cmd_suffix += ( + f" \\\n{pre_space} --test_query_count={test_query_count}" + ) + if extra_docker_input_string != "" or extra_input_string != "": + docker_cmd_suffix += ( + f" \\\n{pre_space} {extra_docker_input_string} {extra_input_string}" + ) + if "resnet50" in model.lower() and framework == "deepsparse": + docker_cmd_suffix += f"\\\n{pre_space} --nm_model_zoo_stub=zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned85_quant-none-vnni" + if "bert" in model.lower() and framework == "deepsparse": + docker_cmd_suffix += f"\\\n{pre_space} --nm_model_zoo_stub=zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none" + if "llama2-70b" in model.lower(): + if implementation == "nvidia": + docker_cmd_suffix += f" \\\n{pre_space} --tp_size=2" + docker_cmd_suffix += f" \\\n{pre_space} --nvidia_llama2_dataset_file_path=" + elif implementation == "neuralmagic": + docker_cmd_suffix += ( + f" \\\n{pre_space} --api_server=http://localhost:8000" + ) + docker_cmd_suffix += f" \\\n{pre_space} --vllm_model_name=nm-testing/Llama-2-70b-chat-hf-FP8" + docker_cmd_suffix += f" \\\n{pre_space} --adr.mlperf-implementation.tags=_repo.https://github.com/neuralmagic/inference,_branch.vllm" + + if "dlrm-v2" in model.lower() and implementation == "nvidia": + docker_cmd_suffix += f" \\\n{pre_space} --criteo_day23_raw_data_path=" + + if "pointpainting" in model.lower() and implementation == "reference": + docker_cmd_suffix += f" \\\n{pre_space} --pointpainting_checkpoint_path=" + docker_cmd_suffix += f" \\\n{pre_space} --deeplab_resnet50_path=" + docker_cmd_suffix += f" \\\n{pre_space} --waymo_path=" + + if "short" in extra_variation_tags: + full_ds_needed_tag = "" + else: + full_ds_needed_tag = "_full," + + docker_setup_cmd = f"""\n +{f_pre_space}```bash +{f_pre_space}mlcr run-mlperf,inference,_find-performance,{full_ds_needed_tag}_{code_version}{scenario_variation_tag}{extra_variation_tags} \\ +{pre_space} --model={model} \\ +{pre_space} --implementation={implementation} \\ +{pre_space} --framework={framework} \\ +{pre_space} --category={category} {scenario_option} \\ +{pre_space} --execution_mode=test \\ +{pre_space} --device={device} {docker_cmd_suffix} +{f_pre_space}```\n""" + + return docker_setup_cmd + run_cmd_extra + + else: + cmd_suffix = f"\\\n{pre_space} --quiet {extra_input_string}" + + if execution_mode == "test" and not skip_test_query_count: + cmd_suffix += f" \\\n {pre_space} --test_query_count={test_query_count}" + + if "resnet50" in model.lower() and framework == "deepsparse": + cmd_suffix += f"\\\n{pre_space} --nm_model_zoo_stub=zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned85_quant-none-vnni" + if "bert" in model.lower() and framework == "deepsparse": + cmd_suffix += f"\\\n{pre_space} --nm_model_zoo_stub=zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none" + + if "llama2-70b" in model.lower(): + if implementation == "nvidia": + cmd_suffix += f" \\\n{pre_space} --tp_size=" + cmd_suffix += f" \\\n{pre_space} --nvidia_llama2_dataset_file_path=" + elif implementation == "neuralmagic": + cmd_suffix += f" \\\n{pre_space} --api_server=http://localhost:8000" + cmd_suffix += f" \\\n{pre_space} --vllm_model_name=nm-testing/Llama-2-70b-chat-hf-FP8" + cmd_suffix += f" \\\n{pre_space} --adr.mlperf-implementation.tags=_repo.https://github.com/neuralmagic/inference,_branch.vllm" + + if "pointpainting" in model.lower() and implementation == "reference": + cmd_suffix += f" \\\n{pre_space} --pointpainting_checkpoint_path=" + cmd_suffix += f" \\\n{pre_space} --deeplab_resnet50_path=" + cmd_suffix += f" \\\n{pre_space} --waymo_path=" + + if "dlrm-v2" in model and implementation == "nvidia": + cmd_suffix += f" \\\n{pre_space} --criteo_day23_raw_data_path=" + + if "short" in extra_variation_tags: + full_ds_needed_tag = "" + else: + full_ds_needed_tag = "_full," + + run_cmd = f"""\n +{f_pre_space}```bash +{f_pre_space}mlcr run-mlperf,inference,{full_ds_needed_tag}_{code_version}{scenario_variation_tag}{extra_variation_tags} \\ +{pre_space} --model={model} \\ +{pre_space} --implementation={implementation} \\ +{pre_space} --framework={framework} \\ +{pre_space} --category={category} {scenario_option} \\ +{pre_space} --execution_mode={execution_mode} \\ +{pre_space} --device={device} {cmd_suffix} +{f_pre_space}```\n""" + + return run_cmd + run_cmd_extra diff --git a/mkdocs.yml b/mkdocs.yml index 3b036ece08..102511c5cb 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -38,7 +38,14 @@ nav: - MLPerf inference benchmark v5.0: - cmx/mlperf-inference/v5.0/index.md - Language Processing: + - Bert-Large: + - Run Commands: cmx/mlperf-inference/v5.0/benchmarks/language/bert.md + - Reproducibility: + - IndySCC24: cmx/mlperf-inference/v5.0/benchmarks/language/reproducibility/indyscc24-bert.md + - GPT-J: cmx/mlperf-inference/v5.0/benchmarks/language/gpt-j.md - LLAMA2-70B: cmx/mlperf-inference/v5.0/benchmarks/language/llama2-70b.md + - LLAMA3-405B: cmx/mlperf-inference/v5.0/benchmarks/language/llama3_1-405b.md + - MIXTRAL-8x7B: cmx/mlperf-inference/v5.0/benchmarks/language/mixtral-8x7b.md - CK Playground: https://access.cKnowledge.org - Releases: https://github.com/mlcommons/ck/releases