From 419766d857bf21db03b4047b9efa077a28b7e71e Mon Sep 17 00:00:00 2001 From: Vighnesh Iyer Date: Mon, 20 Mar 2023 15:24:47 -0700 Subject: [PATCH] Merge the Cadence and Synopsys plugins into Hammer (#713) * merge synopsys and cadence plugins into hammer * get e2e tests working with asap7 on a machines * add ICV link to docs * incorporate genus retiming commit * incorporate genus ILM blackboxes commit * fix type error * add defines for sky130 post-syn sim * Update keys for sky130 at BWRC used for #740 * -top should be optional (#34) * extraneous type:ignores with local mypy * Revert "extraneous type:ignores with local mypy" This reverts commit b0f5f9559656ece62bc6ee2b64440278e1a7bb99. * openroad at bwrc moved to common location --------- Co-authored-by: Harrison Liew --- .gitmodules | 10 +- doc/CAD-Tools/ICV.md | 1 + doc/CAD-Tools/index.rst | 1 + e2e/.gitignore | 1 + e2e/Makefile | 1 + e2e/README.md | 61 +- e2e/configs/asap7-test.yml | 55 - e2e/configs/{test.yml => asap7.yml} | 21 +- e2e/configs/common.yml | 22 + e2e/configs/par-sim.yml | 1 - e2e/configs/{sky130-test.yml => sky130.yml} | 21 - e2e/configs/syn-sim.yml | 1 - e2e/configs/voltus.yml | 1 + e2e/hammer-cadence-plugins | 1 - e2e/hammer-synopsys-plugins | 1 - e2e/pdks/asap7-a.yml | 6 +- e2e/pdks/sky130-a.yml | 17 + e2e/pdks/sky130-bwrc.yml | 16 +- e2e/pyproject.toml | 2 - e2e/src/pass_tb.v | 18 +- hammer/common/cadence/__init__.py | 456 +++++++ hammer/common/synopsys/__init__.py | 76 ++ hammer/drc/icv/README.md | 15 + hammer/drc/icv/__init__.py | 192 +++ hammer/drc/icv/defaults.yml | 37 + hammer/formal/conformal/__init__.py | 290 +++++ hammer/formal/conformal/defaults.yml | 23 + hammer/generate_properties.py | 2 +- hammer/lvs/icv/__init__.py | 240 ++++ hammer/lvs/icv/defaults.yml | 40 + hammer/par/innovus/__init__.py | 1152 +++++++++++++++++ hammer/par/innovus/defaults.yml | 35 + hammer/par/innovus/dump_stackup_to_json.tcl | 69 + hammer/power/joules/__init__.py | 256 ++++ hammer/power/joules/defaults.yml | 11 + hammer/power/voltus/__init__.py | 760 +++++++++++ hammer/power/voltus/defaults.yml | 27 + hammer/sim/vcs/__init__.py | 391 ++++++ hammer/sim/vcs/defaults.yml | 19 + hammer/sim/xcelium/__init__.py | 453 +++++++ hammer/sim/xcelium/defaults.yml | 36 + hammer/sim/xcelium/defaults_types.yml | 24 + hammer/synthesis/genus/__init__.py | 416 ++++++ hammer/synthesis/genus/defaults.yml | 12 + hammer/tech/__init__.py | 52 +- hammer/tech/stackup.py | 2 +- .../nangate45/sram_compiler/__init__.py | 22 +- hammer/technology/sky130/defaults.yml | 4 + .../sky130/sram_compiler/__init__.py | 15 +- hammer/timing/tempus/__init__.py | 288 +++++ hammer/timing/tempus/defaults.yml | 16 + hammer/vlsi/hammer_vlsi_impl.py | 72 +- poetry.lock | 94 +- pyproject.toml | 2 +- tests/test_config.py | 2 +- tests/test_constraints.py | 12 +- tests/utils/stackup.py | 4 +- 57 files changed, 5554 insertions(+), 321 deletions(-) create mode 120000 doc/CAD-Tools/ICV.md create mode 100644 e2e/Makefile delete mode 100644 e2e/configs/asap7-test.yml rename e2e/configs/{test.yml => asap7.yml} (55%) create mode 100644 e2e/configs/common.yml rename e2e/configs/{sky130-test.yml => sky130.yml} (70%) delete mode 160000 e2e/hammer-cadence-plugins delete mode 160000 e2e/hammer-synopsys-plugins create mode 100644 e2e/pdks/sky130-a.yml create mode 100644 hammer/drc/icv/README.md create mode 100644 hammer/drc/icv/__init__.py create mode 100644 hammer/drc/icv/defaults.yml create mode 100644 hammer/formal/conformal/__init__.py create mode 100644 hammer/formal/conformal/defaults.yml create mode 100644 hammer/lvs/icv/__init__.py create mode 100644 hammer/lvs/icv/defaults.yml create mode 100644 hammer/par/innovus/__init__.py create mode 100644 hammer/par/innovus/defaults.yml create mode 100644 hammer/par/innovus/dump_stackup_to_json.tcl create mode 100644 hammer/power/joules/__init__.py create mode 100644 hammer/power/joules/defaults.yml create mode 100644 hammer/power/voltus/__init__.py create mode 100644 hammer/power/voltus/defaults.yml create mode 100644 hammer/sim/vcs/__init__.py create mode 100644 hammer/sim/vcs/defaults.yml create mode 100644 hammer/sim/xcelium/__init__.py create mode 100644 hammer/sim/xcelium/defaults.yml create mode 100644 hammer/sim/xcelium/defaults_types.yml create mode 100644 hammer/synthesis/genus/__init__.py create mode 100644 hammer/synthesis/genus/defaults.yml create mode 100644 hammer/timing/tempus/__init__.py create mode 100644 hammer/timing/tempus/defaults.yml diff --git a/.gitmodules b/.gitmodules index e8efa973a..2e9ce7305 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,12 +1,4 @@ -[submodule "e2e/hammer-cadence-plugins"] - path = e2e/hammer-cadence-plugins - url = git@github.com:ucb-bar/hammer-cadence-plugins - branch = packaging [submodule "e2e/hammer-mentor-plugins"] path = e2e/hammer-mentor-plugins url = git@github.com:ucb-bar/hammer-mentor-plugins - branch = packaging -[submodule "e2e/hammer-synopsys-plugins"] - path = e2e/hammer-synopsys-plugins - url = git@github.com:ucb-bar/hammer-synopsys-plugins - branch = packaging + branch = master diff --git a/doc/CAD-Tools/ICV.md b/doc/CAD-Tools/ICV.md new file mode 120000 index 000000000..80edb981e --- /dev/null +++ b/doc/CAD-Tools/ICV.md @@ -0,0 +1 @@ +../../hammer/drc/icv/README.md \ No newline at end of file diff --git a/doc/CAD-Tools/index.rst b/doc/CAD-Tools/index.rst index 0415ac3e4..74991b7ee 100644 --- a/doc/CAD-Tools/index.rst +++ b/doc/CAD-Tools/index.rst @@ -14,3 +14,4 @@ The package should contain an class object named 'tool' to create an instance of Hammer-CAD-Tools Tool-Plugin-Setup OpenROAD + ICV diff --git a/e2e/.gitignore b/e2e/.gitignore index 7f6392bef..155f04a56 100644 --- a/e2e/.gitignore +++ b/e2e/.gitignore @@ -1,2 +1,3 @@ poetry.lock *.log +obj_dir* diff --git a/e2e/Makefile b/e2e/Makefile new file mode 100644 index 000000000..14cfecbdc --- /dev/null +++ b/e2e/Makefile @@ -0,0 +1 @@ +include obj_dir/hammer.d diff --git a/e2e/README.md b/e2e/README.md index fa6c2c9a4..3447b7fba 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -1,31 +1,60 @@ # Hammer End-to-End Integration Tests -This folder contains an end-to-end (RTL -> GDS) smoketest flow using Hammer, using either of the OpenROAD or Cadence toolchains, and the ASAP7 or Skywater 130 PDKs. +This folder contains an end-to-end (RTL -> GDS) smoketest flow using Hammer, using the Cadence toolchain, and the ASAP7 or Skywater 130 PDKs. ## Setup -### Environment +The integration tests use Hammer as a source dependency, so create the e2e poetry environment. -This repo has environment configs (commercial CAD tool paths and license servers) for Berkeley EECS compute nodes (BWRC, Millennium, and instructional machines) in `env`. -Add a file for your specific environment in `env` modeled after the provided files. +```shell +poetry install +poetry shell +``` -### PDKs +We provide configs for Berkeley EECS compute nodes: BWRC (`-bwrc`), Millennium (`-a`), and instructional machines (`-inst`). -#### ASAP7 +- Environment configs (commercial CAD tool paths and license servers) are in `env` +- PDK configs for ASAP7 and sky130 (pointers to PDK paths and CAD tool versions) are in `pdks` +- The common design config (which CAD tool plugins to use and the design input files) in `configs/common.yml` +- The PDK specific design config (clock, placement, and pin constraints, and which SRAM generator to use) in `configs/{asap7,sky130}.yml` -If you're using a Berkeley EECS compute node, find the ASAP7 install configs in `pdks/asap7-{a,bwrc,inst}.yml`. +First, use Hammer to construct a Makefile fragment with targets for all parts of the RTL -> GDS flow. +Specify the configs according to which PDK and environment you are using. -If you're using another environment: +```shell +hammer-vlsi -e env/a-env.yml -p pdks/asap7-a.yml -p configs/common.yml -p configs/asap7.yml build +``` -1. Clone the [asap7 repo](https://github.com/The-OpenROAD-Project/asap7) -2. Create an ASAP7 install config modeled after the configs in `pdks/asap7{a,bwrc,inst}.yml` +Hammer will generate a Makefile fragment in `obj_dir/hammer.d`. -### CAD Tools +### Run the Flow -### Designs +- RTL simulation + - `make sim-rtl HAMMER_EXTRA_ARGS="-p configs/sim.yml"` + - Generated waveform in `obj_dir/sim-rtl-rundir/output.fsdb` +- Synthesis + - `make syn` + - Gate-level netlist in `obj_dir/syn-rundir/pass.mapped.v` +- Post-Synthesis simulation + - `make syn-to-sim HAMMER_EXTRA_ARGS="-p configs/syn-sim.yml"` + - `make sim-syn HAMMER_EXTRA_ARGS="-p configs/syn-sim.yml"` + - Generated waveform and register forcing ucli script in `obj_dir/sim-syn-rundir` +- PnR + - `make syn-to-par` + - `make par` + - LVS netlist (`pass.lvs.v`) and GDS (`pass.gds`) in `obj_dir/par-rundir` +- Post-PnR simulation + - `make par-to-sim HAMMER_EXTRA_ARGS="-p configs/par-sim.yml"` + - `make sim-par HAMMER_EXTRA_ARGS="-p configs/par-sim.yml"` -## Running Hammer +### Custom Setups -```shell -hammer-vlsi -e env/a-env.yml -p pdks/asap7-a.yml -p test.yml -``` +If you're not using a Berkeley EECS compute node, you can create your own environment setup. + +- Create an environment config for your node to specify the location of the CAD tools, modeled after the yaml files in `env` +- Create a PDK config for your node to specify the PDK paths and versions, modeled after the yaml files in `pdks` +- Point to your custom configs when running `hammer-vlsi`. The rest of the flow should be identical + +#### ASAP7 Install + +Clone the [asap7 repo](https://github.com/The-OpenROAD-Project/asap7) somewhere and reference the path in your PDK yaml config. diff --git a/e2e/configs/asap7-test.yml b/e2e/configs/asap7-test.yml deleted file mode 100644 index 6da9a5abd..000000000 --- a/e2e/configs/asap7-test.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Generate Make include to aid in flow -vlsi.core.build_system: make - -vlsi.inputs.power_spec_type: "cpf" -vlsi.inputs.power_spec_mode: "auto" - -synthesis.inputs: - top_module: "pass" - input_files: ["src/pass.v"] - -# Specify clock signals -vlsi.inputs.clocks: [ - {name: "clock", period: "5ns", uncertainty: "0.1ns"} -] - -# Tool options. -vlsi.core.synthesis_tool: "hammer.synthesis.genus" - -vlsi.core.par_tool: "hammer.par.innovus" -par.innovus.design_flow_effort: "standard" -par.inputs.gds_merge: true -par.innovus.floorplan_mode: "generate" -vlsi.inputs.placement_constraints: - - path: "pass" - type: toplevel - x: 0 - y: 0 - width: 10 - height: 10 - margins: - left: 0 - right: 0 - top: 0 - bottom: 0 - -vlsi.inputs.pin_mode: generated -vlsi.inputs.pin.generate_mode: semi_auto -vlsi.inputs.pin.assignments: [ - {pins: "*", layers: ["M5"], side: "bottom"} -] - -vlsi.inputs.delays: [ - {name: "in", clock: "clock", delay: "1", direction: "input"}, - {name: "out", clock: "clock", delay: "2", direction: "output"} -] - -vlsi.core.sim_tool: "hammer.sim.vcs" -vlsi.core.power_tool: "hammer.power.voltus" -vlsi.core.timing_tool: "hammer.timing.tempus" -vlsi.core.formal_tool: "hammer.formal.conformal" -vlsi.core.drc_tool: "hammer.drc.calibre" -vlsi.core.lvs_tool: "hammer.lvs.calibre" - -# SRAM Compiler compiler options -vlsi.core.sram_generator_tool: "hammer.technology.asap7.sram_compiler" diff --git a/e2e/configs/test.yml b/e2e/configs/asap7.yml similarity index 55% rename from e2e/configs/test.yml rename to e2e/configs/asap7.yml index b3e2064ee..deab023f6 100644 --- a/e2e/configs/test.yml +++ b/e2e/configs/asap7.yml @@ -1,25 +1,9 @@ -# Generate Make include to aid in tlow -vlsi.core.build_system: make - -vlsi.inputs.power_spec_type: "cpf" -vlsi.inputs.power_spec_mode: "auto" - -synthesis.inputs: - top_module: "pass" - input_files: ["src/pass.v"] - # Specify clock signals vlsi.inputs.clocks: [ {name: "clock", period: "5ns", uncertainty: "0.1ns"} ] # Tool options. -vlsi.core.synthesis_tool: "hammer.synthesis.genus" - -vlsi.core.par_tool: "hammer.par.innovus" -par.innovus.design_flow_effort: "standard" -par.inputs.gds_merge: true -par.innovus.floorplan_mode: "generate" vlsi.inputs.placement_constraints: - path: "pass" type: toplevel @@ -44,5 +28,6 @@ vlsi.inputs.delays: [ {name: "out", clock: "clock", delay: "2", direction: "output"} ] -vlsi.core.sim_tool: "hammer.sim.vcs" -vlsi.core.power_tool: "hammer.power.voltus" + +# SRAM Compiler compiler options +vlsi.core.sram_generator_tool: "hammer.technology.asap7.sram_compiler" diff --git a/e2e/configs/common.yml b/e2e/configs/common.yml new file mode 100644 index 000000000..7384b051a --- /dev/null +++ b/e2e/configs/common.yml @@ -0,0 +1,22 @@ +# Generate Make include to aid in flow +vlsi.core.build_system: make + +vlsi.inputs.power_spec_type: "cpf" +vlsi.inputs.power_spec_mode: "auto" + +synthesis.inputs: + top_module: "pass" + input_files: ["src/pass.v"] + +vlsi.core.synthesis_tool: "hammer.synthesis.genus" + +vlsi.core.par_tool: "hammer.par.innovus" +par.innovus.design_flow_effort: "standard" +par.inputs.gds_merge: true +par.innovus.floorplan_mode: "generate" + +vlsi.core.sim_tool: "hammer.sim.vcs" +vlsi.core.timing_tool: "hammer.timing.tempus" +vlsi.core.formal_tool: "hammer.formal.conformal" +vlsi.core.drc_tool: "hammer.drc.calibre" +vlsi.core.lvs_tool: "hammer.lvs.calibre" diff --git a/e2e/configs/par-sim.yml b/e2e/configs/par-sim.yml index 931af3135..68d96d4fc 100644 --- a/e2e/configs/par-sim.yml +++ b/e2e/configs/par-sim.yml @@ -10,4 +10,3 @@ sim.inputs: options: ['-timescale=1ns/10ps'] options_meta: append timing_annotated: false - diff --git a/e2e/configs/sky130-test.yml b/e2e/configs/sky130.yml similarity index 70% rename from e2e/configs/sky130-test.yml rename to e2e/configs/sky130.yml index 1ea084825..93be0dc26 100644 --- a/e2e/configs/sky130-test.yml +++ b/e2e/configs/sky130.yml @@ -1,26 +1,8 @@ -# Generate Make include to aid in flow -vlsi.core.build_system: make - -vlsi.inputs.power_spec_type: "cpf" -vlsi.inputs.power_spec_mode: "auto" - -synthesis.inputs: - top_module: "pass" - input_files: ["src/pass.v"] - # Specify clock signals vlsi.inputs.clocks: [ {name: "clock", period: "10ns", uncertainty: "0.1ns"} ] -# Tool options. -vlsi.core.synthesis_tool: "hammer.synthesis.genus" - -# Placement Constraints -vlsi.core.par_tool: "hammer.par.innovus" -par.innovus.design_flow_effort: "standard" -par.inputs.gds_merge: true -par.innovus.floorplan_mode: "generate" vlsi.inputs.placement_constraints: - path: "pass" type: toplevel @@ -70,8 +52,5 @@ vlsi.inputs.delays: [ {name: "out", clock: "clock", delay: "2", direction: "output"} ] -vlsi.core.sim_tool: "hammer.sim.vcs" -vlsi.core.power_tool: "hammer.power.voltus" - # SRAM Compiler compiler options vlsi.core.sram_generator_tool: "hammer.technology.sky130.sram_compiler" diff --git a/e2e/configs/syn-sim.yml b/e2e/configs/syn-sim.yml index 9fe2e01fc..797b644ae 100644 --- a/e2e/configs/syn-sim.yml +++ b/e2e/configs/syn-sim.yml @@ -10,4 +10,3 @@ sim.inputs: options: ['-timescale=1ns/10ps'] options_meta: append timing_annotated: true - diff --git a/e2e/configs/voltus.yml b/e2e/configs/voltus.yml index d2166a924..c2880a71b 100644 --- a/e2e/configs/voltus.yml +++ b/e2e/configs/voltus.yml @@ -1,5 +1,6 @@ # Voltus Power Analysis +vlsi.core.power_tool: "hammer.power.voltus" power.inputs: level: "par" tb_name: "pass_tb" diff --git a/e2e/hammer-cadence-plugins b/e2e/hammer-cadence-plugins deleted file mode 160000 index 902657b8b..000000000 --- a/e2e/hammer-cadence-plugins +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 902657b8bdba7aec12726d6d3c85a2639a4030ad diff --git a/e2e/hammer-synopsys-plugins b/e2e/hammer-synopsys-plugins deleted file mode 160000 index e53fa5c51..000000000 --- a/e2e/hammer-synopsys-plugins +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e53fa5c51f37937c83df3b004693552f59015c90 diff --git a/e2e/pdks/asap7-a.yml b/e2e/pdks/asap7-a.yml index 8d87f25fd..a17a35076 100644 --- a/e2e/pdks/asap7-a.yml +++ b/e2e/pdks/asap7-a.yml @@ -1,12 +1,12 @@ -vlsi.core.technology: asap7 +vlsi.core.technology: "hammer.technology.asap7" technology.asap7.pdk_install_dir: "/nscratch/asap7/asap7-master/asap7PDK_r1p7" technology.asap7.stdcell_install_dir: "/nscratch/asap7/asap7-master/asap7sc7p5t_27" synthesis.genus.version: "211" par.innovus.version: "211" -sim.vcs.version: "S-2021.09-SP1-1" -sim.vcs.verdi_home: "/ecad/tools/synopsys/verdi/${sim.vcs.version}" +sim.vcs.version: "P-2019.06-SP1" +sim.vcs.verdi_home: "/ecad/tools/synopsys/verdi/P-2019.06-SP2-2" sim.vcs.verdi_home_meta: lazysubst lvs.calibre.version: "2022.2_24.16" diff --git a/e2e/pdks/sky130-a.yml b/e2e/pdks/sky130-a.yml new file mode 100644 index 000000000..2eff659b5 --- /dev/null +++ b/e2e/pdks/sky130-a.yml @@ -0,0 +1,17 @@ +vlsi.core.technology: "hammer.technology.sky130" +technology.sky130: + sky130A: "/nscratch/vighneshiyer/miniconda3/envs/sky130/share/pdk/sky130A" + sram22_sky130_macros: "/nscratch/sky130/sram22_sky130_macros" + # sky130_nda: + +synthesis.genus.version: "211" +par.innovus.version: "211" + +sim.vcs.version: "P-2019.06-SP1" +sim.vcs.verdi_home: "/ecad/tools/synopsys/verdi/P-2019.06-SP2-2" +sim.vcs.verdi_home_meta: lazysubst + +synthesis.yosys.yosys_bin: "/nscratch/vighneshiyer/miniconda3/envs/sky130/bin/yosys" +par.openroad.openroad_bin: "/nscratch/vighneshiyer/miniconda3/envs/sky130/bin/openroad" +drc.magic.magic_bin: "/nscratch/vighneshiyer/miniconda3/envs/sky130/bin/magic" +lvs.netgen.netgen_bin: "/nscratch/vighneshiyer/miniconda3/envs/sky130/bin/netgen" diff --git a/e2e/pdks/sky130-bwrc.yml b/e2e/pdks/sky130-bwrc.yml index 2b4d4bc69..3e0f8571c 100644 --- a/e2e/pdks/sky130-bwrc.yml +++ b/e2e/pdks/sky130-bwrc.yml @@ -1,6 +1,7 @@ vlsi.core.technology: "hammer.technology.sky130" technology.sky130: - sky130A: "/tools/commercial/skywater/local/sky130A" + sky130A: "/tools/commercial/skywater/local/chipyard-tutorial/.conda-sky130/share/pdk/sky130A" + sram22_sky130_macros: "/tools/commercial/skywater/local/chipyard-tutorial/sram22_sky130_macros" openram_lib: "/tools/commercial/skywater/local/sky130_sram_macros" sky130_nda: "/tools/commercial/skywater/swtech130/skywater-src-nda-20221031" @@ -11,7 +12,12 @@ sim.vcs.version: "S-2021.09-SP1-1" sim.vcs.verdi_home: "/tools/synopsys/verdi/${sim.vcs.version}" sim.vcs.verdi_home_meta: lazysubst -synthesis.yosys.yosys_bin: "/users/nayiri/conda/envs/yosys/bin/yosys" -par.openroad.openroad_bin: "/users/nayiri/conda/envs/openroad/bin/openroad" -drc.magic.magic_bin: "/tools/C/nayiri/installs/bin/magic" -lvs.netgen.netgen_bin: "/tools/B/nayiri/installs/netgen/bin/netgen" +synthesis.yosys.yosys_bin: "/tools/commercial/skywater/local/chipyard-tutorial/.conda-yosys/bin/yosys" +par.openroad.openroad_bin: "/tools/commercial/skywater/local/chipyard-tutorial/.conda-openroad/bin/openroad" +par.openroad.klayout_bin: "/tools/commercial/skywater/local/chipyard-tutorial/.conda-klayout/bin/klayout" +drc.magic.magic_bin: "/tools/commercial/skywater/local/chipyard-tutorial/.conda-signoff/bin/magic" +lvs.netgen.netgen_bin: "/tools/commercial/skywater/local/chipyard-tutorial/.conda-signoff/bin/netgen" + +# speed up tutorial runs & declutter log output +par.openroad.timing_driven: false +par.openroad.write_reports: false diff --git a/e2e/pyproject.toml b/e2e/pyproject.toml index ee258dbc6..a63016fb8 100644 --- a/e2e/pyproject.toml +++ b/e2e/pyproject.toml @@ -10,8 +10,6 @@ repository = "https://github.com/ucb-bar/hammer" [tool.poetry.dependencies] python = "^3.9" hammer-vlsi = {path = "../.", extras = ["asap7"], develop = true} -hammer-cadence-plugins = {path = "hammer-cadence-plugins", develop = true} -hammer-synopsys-plugins = {path = "hammer-synopsys-plugins", develop = true} hammer-mentor-plugins = {path = "hammer-mentor-plugins", develop = true} [build-system] diff --git a/e2e/src/pass_tb.v b/e2e/src/pass_tb.v index 4f2e4449a..a248667aa 100644 --- a/e2e/src/pass_tb.v +++ b/e2e/src/pass_tb.v @@ -3,11 +3,11 @@ module pass_tb; reg clk = 0; - always #(5) clk = ~clk; + always #(20) clk = ~clk; - reg in; + reg in; wire out; - + pass pass_0 ( .clock(clk), .in(in), @@ -21,27 +21,27 @@ module pass_tb; in = 0; repeat(3) @(posedge clk); - - #1; + + #5; in = 1; - + if (out == 1) begin $display("***Test Failed***"); end @(posedge clk); - #1; + #5; if (out == 0) begin $display("***Test Failed***"); end - #1; + #5; in = 0; @(posedge clk); - #1; + #5; if (out == 1) begin $display("***Test Failed***"); diff --git a/hammer/common/cadence/__init__.py b/hammer/common/cadence/__init__.py index e69de29bb..3fd05ccb0 100644 --- a/hammer/common/cadence/__init__.py +++ b/hammer/common/cadence/__init__.py @@ -0,0 +1,456 @@ +from functools import reduce +from typing import List, Optional, Dict, Any, Callable +import os +import json +import copy +import inspect + +from hammer.vlsi import HammerTool, HasSDCSupport, HasCPFSupport, HasUPFSupport, TCLTool, ILMStruct +from hammer.vlsi.constraints import MMMCCorner, MMMCCornerType +from hammer.utils import optional_map, add_dicts +import hammer.tech as hammer_tech + + +class CadenceTool(HasSDCSupport, HasCPFSupport, HasUPFSupport, TCLTool, HammerTool): + """Mix-in trait with functions useful for Cadence-based tools.""" + + @property + def env_vars(self) -> Dict[str, str]: + """ + Get the list of environment variables required for this tool. + Note to subclasses: remember to include variables from super().env_vars! + """ + # Use the base extra_env_variables and ensure that our custom variables are on top. + try: + list_of_vars = self.get_setting("cadence.extra_env_vars") # type: List[Dict[str, Any]] + assert isinstance(list_of_vars, list) + except KeyError: + list_of_vars = [] + + cadence_vars = { + "CDS_LIC_FILE": self.get_setting("cadence.CDS_LIC_FILE"), + "CADENCE_HOME": self.get_setting("cadence.cadence_home") + } + + return reduce(add_dicts, [dict(super().env_vars)] + list_of_vars + [cadence_vars], {}) + + def version_number(self, version: str) -> int: + """ + Assumes versions look like MAJOR_ISRMINOR and we will have less than 100 minor versions. + """ + main_version = int(version.split("_")[0]) # type: int + minor_version = 0 # type: int + if "_" in version: + minor_version = int(version.split("_")[1][3:]) + return main_version * 100 + minor_version + + @property + def header(self) -> str: + """ + Header for all generated Tcl scripts + """ + header_text = """ + # -------------------------------------------------------------------------------- + # This script was written and developed by HAMMER at UC Berkeley; however, the + # underlying commands and reports are copyrighted by Cadence. We thank Cadence for + # granting permission to share our research to help promote and foster the next + # generation of innovators. + # -------------------------------------------------------------------------------- + """ + return inspect.cleandoc(header_text) + + def get_timing_libs(self, corner: Optional[MMMCCorner] = None) -> str: + """ + Helper function to get the list of ASCII timing .lib files in space separated format. + Uses a preference filter to collect NLDM, ECSM, or CCS libraries. + + :param corner: Optional corner to consider. If supplied, this will use filter_for_mmmc to select libraries that + match a given corner (voltage/temperature). + :return: List of lib files separated by spaces + """ + + lib_pref = self.get_setting("vlsi.technology.timing_lib_pref") # type: str + + pre_filters = optional_map(corner, lambda c: [self.filter_for_mmmc(voltage=c.voltage, + temp=c.temp)]) # type: Optional[List[Callable[[hammer_tech.Library],bool]]] + + lib_args = self.technology.read_libs([hammer_tech.filters.get_timing_lib_with_preference(lib_pref)], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=pre_filters) + return " ".join(lib_args) + + def get_mmmc_qrc(self, corner: MMMCCorner) -> str: + lib_args = self.technology.read_libs([hammer_tech.filters.qrc_tech_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=[ + self.filter_for_mmmc(voltage=corner.voltage, temp=corner.temp)]) + return " ".join(lib_args) + + def get_qrc_tech(self) -> str: + """ + Helper function to get the list of rc corner tech files in space separated format. + + :return: List of qrc tech files separated by spaces + """ + lib_args = self.technology.read_libs([ + hammer_tech.filters.qrc_tech_filter + ], hammer_tech.HammerTechnologyUtils.to_plain_item) + return " ".join(lib_args) + + def generate_sdc_files(self) -> List[str]: + """ + Generate SDC files for use in mmmc script. + """ + sdc_files = [] # type: List[str] + + # Generate constraints + clock_constraints_fragment = os.path.join(self.run_dir, "clock_constraints_fragment.sdc") + self.write_contents_to_path(self.sdc_clock_constraints, clock_constraints_fragment) + sdc_files.append(clock_constraints_fragment) + + # Generate port constraints. + pin_constraints_fragment = os.path.join(self.run_dir, "pin_constraints_fragment.sdc") + self.write_contents_to_path(self.sdc_pin_constraints, pin_constraints_fragment) + sdc_files.append(pin_constraints_fragment) + + return sdc_files + + def generate_mmmc_script(self) -> str: + """ + Output for the mmmc.tcl script. + Innovus (init_design) requires that the timing script be placed in a separate file. + + :return: Contents of the mmmc script. + """ + mmmc_output = [] # type: List[str] + + def append_mmmc(cmd: str) -> None: + self.verbose_tcl_append(cmd, mmmc_output) + + # Create an Innovus constraint mode. + constraint_mode = "my_constraint_mode" + + sdc_files = self.generate_sdc_files() + + # Add the post-synthesis SDC, if present. + post_synth_sdc = self.post_synth_sdc + if post_synth_sdc is not None: + sdc_files.append(post_synth_sdc) + + # TODO: add floorplanning SDC + if len(sdc_files) > 0: + sdc_files_arg = "-sdc_files [list {sdc_files}]".format( + sdc_files=" ".join(sdc_files) + ) + else: + blank_sdc = os.path.join(self.run_dir, "blank.sdc") + self.run_executable(["touch", blank_sdc]) + sdc_files_arg = "-sdc_files {{ {} }}".format(blank_sdc) + append_mmmc("create_constraint_mode -name {name} {sdc_files_arg}".format( + name=constraint_mode, + sdc_files_arg=sdc_files_arg + )) + + corners = self.get_mmmc_corners() # type: List[MMMCCorner] + # In parallel, create the delay corners + if corners: + setup_view_names = [] # type: List[str] + hold_view_names = [] # type: List[str] + extra_view_names = [] # type: List[str] + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + corner_name = "{n}.{t}".format(n=corner.name, t="setup") + setup_view_names.append("{n}_view".format(n=corner_name)) + elif corner.type is MMMCCornerType.Hold: + corner_name = "{n}.{t}".format(n=corner.name, t="hold") + hold_view_names.append("{n}_view".format(n=corner_name)) + elif corner.type is MMMCCornerType.Extra: + corner_name = "{n}.{t}".format(n=corner.name, t="extra") + extra_view_names.append("{n}_view".format(n=corner_name)) + else: + raise ValueError("Unsupported MMMCCornerType") + + # First, create Innovus library sets + append_mmmc("create_library_set -name {name}_set -timing [list {list}]".format( + name=corner_name, + list=self.get_timing_libs(corner) + )) + # Skip opconds for now + # Next, create Innovus timing conditions + append_mmmc("create_timing_condition -name {name}_cond -library_sets [list {name}_set]".format( + name=corner_name + )) + # Next, create Innovus rc corners from qrc tech files + append_mmmc("create_rc_corner -name {name}_rc -temperature {tempInCelsius} {qrc}".format( + name=corner_name, + tempInCelsius=str(corner.temp.value), + qrc="-qrc_tech {}".format(self.get_mmmc_qrc(corner)) if self.get_mmmc_qrc(corner) != '' else '' + )) + # Next, create an Innovus delay corner. + append_mmmc( + "create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc".format( + name=corner_name + )) + # Next, create the analysis views + append_mmmc("create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}".format( + name=corner_name, + constraint=constraint_mode + )) + + # Finally, apply the analysis view. + # TODO: should not need to analyze extra views as well. Defaulting to hold for now (min. runtime impact). + append_mmmc("set_analysis_view -setup {{ {setup_views} }} -hold {{ {hold_views} {extra_views} }}".format( + setup_views=" ".join(setup_view_names), + hold_views=" ".join(hold_view_names), + extra_views=" ".join(extra_view_names) + )) + else: + # First, create an Innovus library set. + library_set_name = "my_lib_set" + append_mmmc("create_library_set -name {name} -timing [list {list}]".format( + name=library_set_name, + list=self.get_timing_libs() + )) + # Next, create an Innovus timing condition. + timing_condition_name = "my_timing_condition" + append_mmmc("create_timing_condition -name {name} -library_sets [list {list}]".format( + name=timing_condition_name, + list=library_set_name + )) + # extra junk: -opcond ... + rc_corner_name = "rc_cond" + append_mmmc("create_rc_corner -name {name} {qrc}".format( + name=rc_corner_name, + qrc="-qrc_tech {}".format(self.get_qrc_tech()) if self.get_qrc_tech() != '' else '' + )) + # Next, create an Innovus delay corner. + delay_corner_name = "my_delay_corner" + append_mmmc( + "create_delay_corner -name {name} -timing_condition {timing_cond} -rc_corner {rc}".format( + name=delay_corner_name, + timing_cond=timing_condition_name, + rc=rc_corner_name + )) + # extra junk: -rc_corner my_rc_corner_maybe_worst + # Next, create an Innovus analysis view. + analysis_view_name = "my_view" + append_mmmc("create_analysis_view -name {name} -delay_corner {corner} -constraint_mode {constraint}".format( + name=analysis_view_name, corner=delay_corner_name, constraint=constraint_mode)) + # Finally, apply the analysis view. + # TODO: introduce different views of setup/hold and true multi-corner + append_mmmc("set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }}".format( + setup_view=analysis_view_name, + hold_view=analysis_view_name + )) + + return "\n".join(mmmc_output) + + def generate_dont_use_commands(self) -> List[str]: + """ + Generate a list of dont_use commands for Cadence tools. + """ + + def map_cell(in_cell: str) -> str: + # "*/" is needed for "get_db lib_cells " + if in_cell.startswith("*/"): + mapped_cell = in_cell # type: str + else: + mapped_cell = "*/" + in_cell + + # Check for cell existence first to avoid Genus erroring out. + get_db_str = "[get_db lib_cells {mapped_cell}]".format(mapped_cell=mapped_cell) + # Escaped version for puts. + get_db_str_escaped = get_db_str.replace('[', '\[').replace(']', '\]') + return """ +puts "set_dont_use {get_db_str_escaped}" +if {{ {get_db_str} ne "" }} {{ + set_dont_use {get_db_str} +}} else {{ + puts "WARNING: cell {mapped_cell} was not found for set_dont_use" +}} + """.format(get_db_str=get_db_str, get_db_str_escaped=get_db_str_escaped, mapped_cell=mapped_cell) + + return list(map(map_cell, self.get_dont_use_list())) + + def map_power_spec_name(self) -> str: + """ + Return the CPF or UPF flag name for Cadence tools. + """ + + power_spec_type = str(self.get_setting("vlsi.inputs.power_spec_type")) # type: str + power_spec_arg = "" # type: str + if power_spec_type == "cpf": + power_spec_arg = "cpf" + elif power_spec_type == "upf": + power_spec_arg = "1801" + else: + self.logger.error( + "Invalid power specification type '{tpe}'; only 'cpf' or 'upf' supported".format(tpe=power_spec_type)) + return "" + return power_spec_arg + + def create_power_spec(self) -> str: + """ + Generate a power specification file for Cadence tools. + """ + + power_spec_type = str(self.get_setting("vlsi.inputs.power_spec_type")) # type: str + power_spec_contents = "" # type: str + power_spec_mode = str(self.get_setting("vlsi.inputs.power_spec_mode")) # type: str + if power_spec_mode == "empty": + return "" + elif power_spec_mode == "auto": + if power_spec_type == "cpf": + power_spec_contents = self.cpf_power_specification + elif power_spec_type == "upf": + power_spec_contents = self.upf_power_specification + elif power_spec_mode == "manual": + power_spec_contents = str(self.get_setting("vlsi.inputs.power_spec_contents")) + else: + self.logger.error("Invalid power specification mode '{mode}'; using 'empty'.".format(mode=power_spec_mode)) + return "" + + # Write the power spec contents to file and include it + power_spec_file = os.path.join(self.run_dir, "power_spec.{tpe}".format(tpe=power_spec_type)) + self.write_contents_to_path(power_spec_contents, power_spec_file) + + return power_spec_file + + def generate_power_spec_commands(self) -> List[str]: + """ + Generate commands to load a power specification for Cadence tools. + """ + + power_spec_file = self.create_power_spec() + power_spec_arg = self.map_power_spec_name() + + return ["read_power_intent -{arg} {path}".format(arg=power_spec_arg, path=power_spec_file), + "commit_power_intent"] + + def child_modules_tcl(self) -> str: + """ + Dumps a list of child instance paths and their ilm directories. + Should only be called when self.hierarchical_mode.is_nonleaf_hierarchical() + """ + if self.get_setting("vlsi.inputs.hierarchical.config_source") != "manual": + self.logger.warning(''' + Hierarchical write_regs requires having vlsi.inputs.hierarchical.manual_modules specified. + You may have problems with register forcing in gate-level sim. + ''') + return ''' + set child_modules_ir "./find_child_modules.json" + set child_modules_ir [open $child_modules_ir "w"] + puts $child_modules_ir "\{\}" + close $child_modules_ir + ''' + else: + # Write out the paths to all child find_regs_paths.json files + child_modules = list(next(d for i,d in enumerate(self.get_setting("vlsi.inputs.hierarchical.manual_modules")) if self.top_module in d).values())[0] + + # Get all paths to the child module instances + # For P&R, this only works in the flattened ILM state + return ''' + set child_modules_ir "./find_child_modules.json" + set child_modules_ir [open $child_modules_ir "w"] + puts $child_modules_ir "\{{" + + set cells {{ {CELLS} }} + set numcells [llength $cells] + + for {{set i 0}} {{$i < $numcells}} {{incr i}} {{ + set cell [lindex $cells $i] + set inst_paths [get_db [get_db modules -if {{.name==$cell}}] .hinsts.name] + set inst_paths [join $inst_paths "\\", \\""] + if {{$i == $numcells - 1}} {{ + puts $child_modules_ir " \\"$cell\\": \\[\\"$inst_paths\\"\\]" + }} else {{ + puts $child_modules_ir " \\"$cell\\": \\[\\"$inst_paths\\"\\]," + }} + }} + + puts $child_modules_ir "\}}" + + close $child_modules_ir + '''.format(CELLS=" ".join(child_modules)) + + def write_regs_tcl(self) -> str: + return ''' + set write_cells_ir "./find_regs_cells.json" + set write_cells_ir [open $write_cells_ir "w"] + puts $write_cells_ir "\[" + + set refs [get_db [get_db lib_cells -if .is_sequential==true] .base_name] + + set len [llength $refs] + + for {set i 0} {$i < [llength $refs]} {incr i} { + if {$i == $len - 1} { + puts $write_cells_ir " \\"[lindex $refs $i]\\"" + } else { + puts $write_cells_ir " \\"[lindex $refs $i]\\"," + } + } + + puts $write_cells_ir "\]" + close $write_cells_ir + set write_regs_ir "./find_regs_paths.json" + set write_regs_ir [open $write_regs_ir "w"] + puts $write_regs_ir "\[" + + set regs [get_db [get_db [all_registers -edge_triggered -output_pins] -if .direction==out] .name] + + set len [llength $regs] + + for {set i 0} {$i < [llength $regs]} {incr i} { + #regsub -all {/} [lindex $regs $i] . myreg + set myreg [lindex $regs $i] + if {$i == $len - 1} { + puts $write_regs_ir " \\"$myreg\\"" + } else { + puts $write_regs_ir " \\"$myreg\\"," + } + } + + puts $write_regs_ir "\]" + + close $write_regs_ir + ''' + + def process_reg_paths(self, path: str) -> bool: + # Post-process the all_regs list here to avoid having too much logic in TCL + with open(path, "r+") as f: + reg_paths = json.load(f) + output_paths = [] # type: List[Dict[str,str]] + assert isinstance(reg_paths, List), "Output find_regs_paths.json should be a json list of strings" + for i in range(len(reg_paths)): + split = reg_paths[i].split("/") + # If the net is part of a generate block, the generated names have a "." in them and the whole name + # needs to be escaped. + for index, node in enumerate(split): + if "." in node: + split[index] = "\\" + node + "\\" + # If the last net is part of a bus, it needs to be escaped + if split[-2][-1] == "]": + split[-2] = "\\" + split[-2] + reg_paths[i] = {"path" : '/'.join(split[0:len(split)-1]), "pin" : split[-1]} + else: + reg_paths[i] = {"path" : '/'.join(split[0:len(split)-1]), "pin" : split[-1]} + + # For parent hierarchical modules, append all child instance regs + if self.hierarchical_mode.is_nonleaf_hierarchical(): + with open(os.path.join(os.path.dirname(path), "find_child_modules.json"), "r") as cmf: + mod_paths = json.load(cmf) + for mod_path in mod_paths.items(): + ilm = next(i for i in self.get_input_ilms() if i.module == mod_path[0]) # type: ILMStruct + with open(os.path.join(os.path.dirname(ilm.dir), "find_regs_paths.json"), "r") as crf: + child_regs = json.load(crf) + for inst_path in mod_path[1]: + prefixed_regs = copy.deepcopy(child_regs) + for reg in prefixed_regs: + reg.update({'path': os.path.join(inst_path, reg['path'])}) + reg_paths.extend(prefixed_regs) + + f.seek(0) # Move to beginning to rewrite file + json.dump(reg_paths, f, indent=2) # Elide the truncation because we are always increasing file size + return True diff --git a/hammer/common/synopsys/__init__.py b/hammer/common/synopsys/__init__.py index e69de29bb..c77622f70 100644 --- a/hammer/common/synopsys/__init__.py +++ b/hammer/common/synopsys/__init__.py @@ -0,0 +1,76 @@ +import datetime +import inspect +import os +from typing import Optional, Dict + +from hammer.vlsi import HasSDCSupport, TCLTool, HammerTool + + +class SynopsysTool(HasSDCSupport, TCLTool, HammerTool): + """Mix-in trait with functions useful for Synopsys-based tools.""" + + ## FIXME: not used by any Synopsys tool + @property + def post_synth_sdc(self) -> Optional[str]: + return None + + @property + def env_vars(self) -> Dict[str, str]: + """ + Get the list of environment variables required for this tool. + Note to subclasses: remember to include variables from super().env_vars! + """ + result = dict(super().env_vars) + result.update({ + "SNPSLMD_LICENSE_FILE": self.get_setting("synopsys.SNPSLMD_LICENSE_FILE"), + # TODO: this is actually a Mentor Graphics licence, not sure why the old dc scripts depend on it. + "MGLS_LICENSE_FILE": self.get_setting("synopsys.MGLS_LICENSE_FILE") + }) + return result + + def version_number(self, version: str) -> int: + """ + Assumes versions look like NAME-YYYY.MM-SPMINOR. + Assumes less than 100 minor versions. + """ + date = "-".join(version.split("-")[1:]) # type: str + year = int(date.split(".")[0]) # type: int + month = int(date.split(".")[1][:2]) # type: int + minor_version = 0 # type: int + if "-" in date: + minor_version = int(date.split("-")[1][2:]) + return (year * 100 + month) * 100 + minor_version + + @property + def header(self) -> str: + """ + Header for all generated Tcl scripts + """ + header_text = f""" + # --------------------------------------------------------------------------------- + # Portions Copyright ©{datetime.date.today().year} Synopsys, Inc. All rights reserved. Portions of + # these TCL scripts are proprietary to and owned by Synopsys, Inc. and may only be + # used for internal use by educational institutions (including United States + # government labs, research institutes and federally funded research and + # development centers) on Synopsys tools for non-profit research, development, + # instruction, and other non-commercial uses or as otherwise specifically set forth + # by written agreement with Synopsys. All other use, reproduction, modification, or + # distribution of these TCL scripts is strictly prohibited. + # --------------------------------------------------------------------------------- + """ + return inspect.cleandoc(header_text) + + def get_synopsys_rm_tarball(self, product: str, settings_key: str = "") -> str: + """Locate reference methodology tarball. + + :param product: Either "DC" or "ICC" + :param settings_key: Key to retrieve the version for the product. Leave blank for DC and ICC. + """ + key = self.tool_config_prefix() + "." + "version" # type: str + + synopsys_rm_tarball = os.path.join(self.get_setting("synopsys.rm_dir"), "%s-RM_%s.tar" % (product, self.get_setting(key))) + if not os.path.exists(synopsys_rm_tarball): + # TODO: convert these to logger calls + raise FileNotFoundError("Expected reference methodology tarball not found at %s. Use the Synopsys RM generator to generate a DC reference methodology. If these tarballs have been pre-downloaded, you can set synopsys.rm_dir instead of generating them yourself." % (synopsys_rm_tarball)) + else: + return synopsys_rm_tarball \ No newline at end of file diff --git a/hammer/drc/icv/README.md b/hammer/drc/icv/README.md new file mode 100644 index 000000000..4a142a93e --- /dev/null +++ b/hammer/drc/icv/README.md @@ -0,0 +1,15 @@ +# DRC/LVS with IC Validator + +IC Validator is very command-line driven. Here are some usage notes: + +* Many PDK decks will use variables to control switches. These are defined on command line with `-D` and can be defined in Hammer config using the `.icv.defines` key (type: List[Dict[str, str]]). +* Any deck directories that need to be included are defined on command line with `-I` and can be defined in Hammer config using the `.icv.include_dirs` key (type: List[str]). +* Extensibility is enabled by passing a file to the icv command with `-clf`. This file contains additional command line arguments, and is generated in the `generate__args_file` step (can be overridden). +* Decks are included using the `generate__run_file` step (can be overridden with additional ICV method calls). +* Results/violations are generated in a format readable by VUE (interactive violation browser) using the `-vue` option. +* Layout is viewed using IC Validator Workbench (ICVBV). It can communicate with VUE to generate violation markers by opening up a socket to ICV. The socket number can range between 1000 and 65535 (selectable by `.icv.icvwb_port`. Running the `generated_scripts/view_` script will handle this automatically, by starting ICVWB, opening the port, waiting for it to be listening, and then starting VUE. +* ICVWB layer mapping can be specified in `synopsys.layerprops` key. + +Tested with: + +* hammer-intech22-plugin diff --git a/hammer/drc/icv/__init__.py b/hammer/drc/icv/__init__.py new file mode 100644 index 000000000..7eafd6b49 --- /dev/null +++ b/hammer/drc/icv/__init__.py @@ -0,0 +1,192 @@ +# hammer-vlsi plugin for Synopsys IC Validator. +# +# See LICENSE for licence details. + +from hammer.vlsi import HammerToolStep +from hammer.vlsi import HammerDRCTool +from hammer.common.synopsys import SynopsysTool +from hammer.logging import HammerVLSILogging + +from typing import Dict, List, Optional + +import os +import textwrap + + +class ICVDRC(HammerDRCTool, SynopsysTool): + + def tool_config_prefix(self) -> str: + return "drc.icv" + + def drc_results_pre_waived(self) -> Dict[str, int]: + return {} + + def fill_outputs(self) -> bool: + return True + + # TODO: placeholder empty step + def empty_step(self) -> bool: + return True + + @property + def steps(self) -> List[HammerToolStep]: + steps = [self.generate_drc_run_file, self.generate_drc_args_file] # TODO: DRC steps require multiple runs of the tool how do we support this? + return self.make_steps_from_methods(steps) + + def do_post_steps(self) -> bool: + assert super().do_post_steps() + return self.run_icv() + + def globally_waived_drc_rules(self) -> List[str]: + return [] + + def run_icv(self) -> bool: + self.create_enter_script() + + # set the command arguments + args = [ + self.get_setting("drc.icv.icv_drc_bin"), + "-64"] # always want to be in 64-bit mode + if self.version() >= self.version_number("R-2020.09"): + args.extend(["-host_init", str(self.get_setting("vlsi.core.max_threads"))]) + else: + args.append("-dp{}".format(self.get_setting("vlsi.core.max_threads"))) + args.extend([ + "-clf", + self.drc_args_file, + "-vue", # needed to view results in VUE + "-verbose" # get more than % complete + ]) + args.append(self.drc_run_file) + + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + self.run_executable(args, cwd=self.run_dir) # TODO: check for errors and deal with them + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + # TODO: check that drc run was successful + + # Create view_drc script & icvwb macro script file + # See the README for how this works + os.makedirs(self.generated_scripts_dir, exist_ok=True) + + with open(self.icvwb_macrofile, "w") as f: + # Open socket + f.write("user_socket open 0\n") + # Layer mapping + layerprops_file = self.get_setting("synopsys.layerprops") + if layerprops_file is not None: + f.writelines(l for l in open(layerprops_file)) + + with open(self.view_drc_script, "w") as f: + f.write(""" + cd {run_dir} + source enter + # Start Synopsys IC Validator WorkBench and wait for port to open before starting VUE + {icvwb} -socket {port} -run {macrofile} {gds} & + while ! nc -z localhost {port}; do + sleep 0.1 + done + {icv_vue} -64 -load {results} -lay icwb -layArgs Port {port} + """.format( + run_dir=self.run_dir, + icvwb=self.get_setting("drc.icv.icvwb_bin"), + port=self.get_setting("drc.icv.icvwb_port"), + macrofile=self.icvwb_macrofile, + gds=self.layout_file, + icv_vue=self.get_setting("drc.icv.icv_vue_bin"), + results=self.drc_results_db + )) + os.chmod(self.view_drc_script, 0o755) + + return True + + def generate_drc_run_file(self) -> bool: + """ Generate the DRC run file self.drc_run_file and fill its contents """ + with open(self.drc_run_file, "w") as f: + f.write(self.header.replace("#","//") + "\n\n") + f.write(self.get_additional_drc_text()) + # Include paths to all supplied decks + for rule in self.get_drc_decks(): + f.write("#include <{}>\n".format(rule.path)) + return True + + def generate_drc_args_file(self) -> bool: + """ Generate the DRC args file self.drc_args_file and fill its contents """ + with open(self.drc_args_file, "w") as f: + f.write(textwrap.dedent(""" + # Generated by HAMMER + -i {gds}\ + -c {top}\ + -f GDSII\ + """).format( + gds=self.layout_file, + top=self.top_module + ) + ) + # Symbolic variables to set via command. Can also use #define in additional_drc_text. + # TODO: change if other technologies don't use -D switches to select checks. + if len(self.drc_rules_to_run()) > 0: + f.write(" -D " + " -D ".join(self.drc_rules_to_run())) + defines = self.get_setting("drc.icv.defines") # type: List[Dict[str, str]] + assert isinstance(defines, list) + if len(defines) > 0: + # Most comprehensive way of covering all List[Dict] possibilities + f.write(" -D " + " -D ".join(map(lambda x: " -D ".join("=".join(_) for _ in x.items()), defines))) + + # Preprocessor directories to include + include_dirs = self.get_setting("drc.icv.include_dirs") # type: List[str] + assert isinstance(include_dirs, list) + if len(include_dirs) > 0: + f.write(" -I " + " -I ".join(include_dirs)) + + # Config runset file + config_rs = self.get_setting("drc.icv.config_runset") # type: Optional[str] + if config_rs is not None: + f.write(" -config_runset" + config_rs) + return True + + @property + def generated_scripts_dir(self) -> str: + return os.path.join(self.run_dir, "generated-scripts") + + @property + def view_drc_script(self) -> str: + return os.path.join(self.generated_scripts_dir, "view_drc") + + @property + def icvwb_macrofile(self) -> str: + return os.path.join(self.generated_scripts_dir, "icvwb_macrofile") + + @property + def drc_run_file(self) -> str: + return os.path.join(self.run_dir, "drc_run_file") + + @property + def drc_args_file(self) -> str: + return os.path.join(self.run_dir, "drc_args_file") + + @property + def drc_results_db(self) -> str: + return os.path.join(self.run_dir, self.top_module + ".vue") + + @property + def drc_results_file(self) -> str: + return os.path.join(self.run_dir, self.top_module + ".LAYOUT_ERRORS") + + @property + def env_vars(self) -> Dict[str, str]: + """ + Get the list of environment variables required for this tool. + Note to subclasses: remember to include variables from super().env_vars! + """ + result = dict(super().env_vars) + result.update({ + "ICV_HOME_DIR": self.get_setting("drc.icv.ICV_HOME_DIR"), + "PATH": "{path}:{icv_home}/bin/LINUX.64".format(path=os.environ.copy()["PATH"], icv_home=self.get_setting("drc.icv.ICV_HOME_DIR")) + }) + return result + + +tool = ICVDRC diff --git a/hammer/drc/icv/defaults.yml b/hammer/drc/icv/defaults.yml new file mode 100644 index 000000000..bc43cfe5e --- /dev/null +++ b/hammer/drc/icv/defaults.yml @@ -0,0 +1,37 @@ +# Default settings for ICV DRC, for project/technology configuration and overriding. +drc.icv: + # Location of the binary + icv_drc_bin: "${synopsys.synopsys_home}/icv/${drc.icv.version}/bin/LINUX.64/icv" + icv_drc_bin_meta: lazysubst + + icv_vue_bin: "${synopsys.synopsys_home}/icv/${drc.icv.version}/bin/LINUX.64/icv_vue" + icv_vue_bin_meta: lazysubst + + icvwb_bin: "${synopsys.synopsys_home}/icvwb/${drc.icv.icvwb_version}/bin/icvwb" + icvwb_bin_meta: lazysubst + + ICV_HOME_DIR: "${synopsys.synopsys_home}/icv/${drc.icv.version}" + ICV_HOME_DIR_meta: lazysubst + + # type: str + version: "S-2021.06-SP3-2" + # type: str + icvwb_version: "S-2021.06-SP2" + # Port for VUE (violation browser) to communicate with ICVWB (layout browser) + # Any open port 1000 to 65536 allowed + # type: int + icvwb_port: 1234 + + # Symbolic variables passed as -D flags to ICV command. + # Alternatively, #define can be appended as additional_drc_text. + # type: List[Dict[str, str]] + defines: [] + + # Preprocessor include directories passed as -I to the ICV command. + # type: List[str] + include_dirs: [] + + # Config runset file passed in as -config_runset to the ICV command. + # Generally used for waivers. + # type: Optional[str] + config_runset: null diff --git a/hammer/formal/conformal/__init__.py b/hammer/formal/conformal/__init__.py new file mode 100644 index 000000000..8ca00db6a --- /dev/null +++ b/hammer/formal/conformal/__init__.py @@ -0,0 +1,290 @@ +# hammer-vlsi plugin for Cadence Conformal. +# +# See LICENSE for licence details. + +from typing import List, Dict, Tuple + +import os +import errno + +from hammer.vlsi import HammerFormalTool, HammerToolStep +from hammer.logging import HammerVLSILogging +import hammer.tech as hammer_tech +from hammer.common.cadence import CadenceTool + +# Notes: Tcl mode is enabled for harmonization with other Cadence tools and additional Tcl functionality. +# There is a minor performance hit with database operations vs. native language. + +class Conformal(HammerFormalTool, CadenceTool): + + def tool_config_prefix(self) -> str: + return "formal.conformal" + + @property + def env_vars(self) -> Dict[str, str]: + v = dict(super().env_vars) + if self.check in ["constraint", "cdc"]: + v["CONFORMAL_BIN"] = self.get_setting("formal.conformal.conformal_ccd_bin") + else: + v["CONFORMAL_BIN"] = self.get_setting("formal.conformal.conformal_lec_bin") + return v + + @property + def start_cmd(self) -> List[str]: + """ Generate required startup command based on the requested check and license level """ + lec_bin = self.get_setting("formal.conformal.conformal_lec_bin") + ccd_bin = self.get_setting("formal.conformal.conformal_ccd_bin") + license = self.get_setting("formal.conformal.license") + cmd = ["", ""] + if not license in ["L", "XL", "GXL"]: + self.logger.error("License must be L, XL, or GXL. For CCD, -MCC is equivalent to GXL here.") + + if self.check == "lec": + cmd = [lec_bin, f"-{license}"] + elif self.check == "power": + if license == "L": + self.logger.error("power not supported with L license") + else: + cmd = [lec_bin, f"-LP{license}"] + elif self.check == "eco": + if license == "L": + self.logger.error("eco not supported with L license") + elif license == "XL": + cmd = [lec_bin, "-ECO"] + else: + cmd = [lec_bin, "-ECOGXL"] + elif self.check == "property": + return [lec_bin, "-VERIFY"] + elif self.check in ["constraint", "cdc"]: + if license == "GXL": + cmd = [ccd_bin, "-MCC"] + else: + cmd = [ccd_bin, f"-{license}"] + else: + self.logger.error("Unsupported check type") + + return cmd + + def check_reference_files(self, extensions: List[str]) -> bool: + """ + Verify that reference files exist and have the specified extensions. + Analogous to check_input_files in HammerTool. + + :param extensions: List of extensions e.g. [".v", ".sv"] + :return: True if all files exist and have the specified extensions. + """ + refs = self.reference_files + error = False + for r in refs: + if not r.endswith(tuple(extensions)): + self.logger.error(f"Input of unsupported type {r} detected!") + error = True + if not os.path.isfile(r): + self.logger.error(f"Input file {r} does not exist!") + error = True + return not error + + @property + def restart_checkpoint(self) -> str: + """ Name of checkpoint to be restarted from (set by do_pre_steps) """ + return self.attr_getter("_restart_checkpoint", "") + + @restart_checkpoint.setter + def restart_checkpoint(self, val: str) -> None: + self.attr_setter("_restart_checkpoint", val) + + @property + def _step_transitions(self) -> List[Tuple[str, str]]: + """ + Private helper property to keep track of which steps we ran so that we + can create symlinks. + This is a list of (pre, post) steps + """ + return self.attr_getter("__step_transitions", []) + + @_step_transitions.setter + def _step_transitions(self, value: List[Tuple[str, str]]) -> None: + self.attr_setter("__step_transitions", value) + + + def do_pre_steps(self, first_step: HammerToolStep) -> bool: + assert super().do_pre_steps(first_step) + # Restart from the last checkpoint if we're not starting over. + # Not in the dofile, must be a command-line option + if first_step != self.first_step: + self.restart_checkpoint = f"pre_{first_step.name}" + return True + + def do_between_steps(self, prev: HammerToolStep, next: HammerToolStep) -> bool: + assert super().do_between_steps(prev, next) + # Write a checkpoint to disk. + self.append(f"checkpoint pre_{next.name} -replace") + # Symlink the checkpoint to latest for open_checkpoint script later. + self.append(f"ln -sfn pre_{next.name} latest") + self._step_transitions = self._step_transitions + [(prev.name, next.name)] + return True + + def do_post_steps(self) -> bool: + assert super().do_post_steps() + # Create symlinks for post_ to pre_ to improve usability. + try: + for prev, next in self._step_transitions: + os.symlink( + os.path.join(self.run_dir, f"pre_{next}"), # src + os.path.join(self.run_dir, f"post_{prev}") # dst + ) + except OSError as e: + if e.errno != errno.EEXIST: + self.logger.warning("Failed to create post_* symlinks: " + str(e)) + + # Create checkpoint post_ + # TODO: this doesn't work if you're only running the very last step + if len(self._step_transitions) > 0: + last = f"post_{self._step_transitions[-1][1]}" + self.append(f"checkpoint {last} -replace") + # Symlink the database to latest for open_checkpoint script later. + self.append(f"ln -sfn {last} latest") + + return self.generate_open_checkpoint() and self.run_conformal() + + @property + def steps(self) -> List[HammerToolStep]: + if self.check != "lec": + self.logger.error("Check type {self.check} not yet supported!") + steps = [ + self.setup_designs, + self.compare_designs + ] + return self.make_steps_from_methods(steps) + + def setup_designs(self) -> bool: + """ Setup the designs """ + append = self.append + + # Exit on dofile error + append("set_dofile_abort exit") + + # Multithreading (max 16 allowed by tool) + max_threads = min(self.get_setting("vlsi.core.max_threads"), 16) + append(f"set_parallel_option -threads 1,{max_threads}") + + # Read libraries (macros, stdcells) + # TODO: support VHDL + Liberty. For now, -sva = SystemVerilog w/ assertion support. + lib_v_files = self.technology.read_libs( + [hammer_tech.filters.verilog_synth_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item) + lib_v_files.extend(self.technology.read_libs( + [hammer_tech.filters.verilog_sim_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item)) + append(f"read_library {' '.join(lib_v_files)} -sva -bboxsolver -both") + + # Read designs + valid_exts = [".v", ".v.gz", ".sv", ".sv.gz"] + if not self.check_input_files(valid_exts) or not self.check_reference_files(valid_exts): + return False + golden_files = list(map(lambda name: os.path.join(os.getcwd(), name), self.reference_files)) + append(f"read_design {' '.join(golden_files)} -sva -golden") + revised_files = list(map(lambda name: os.path.join(os.getcwd(), name), self.input_files)) + append(f"read_design {' '.join(revised_files)} -sva -revised") + + # Set top module + append(f"set_root_module {self.top_module} -both") + + # Auto setup analysis optimizations + if self.get_setting("formal.conformal.license") != "L": + append("set_analyze_option -auto") + + # Setup reports + append("report_design_data") + + return True + + def compare_designs(self) -> bool: + """ Depending on license, performs flat or hierarchical comparison """ + append = self.append + + if self.get_setting("formal.conformal.license") == "L": + append("report_black_box") + append("set_system_mode lec") + append("add_compare_point -all") + append("compare") + append("report_compare_data") + else: + # TODO: need resource file for DC-mapped netlists + append('write_hier_compare_dofile hier_compare.tcl -replace '\ + '-prepend_string "analyze_datapath -module; analyze_datapath"') + append("run_hier_compare hier_compare.tcl") + append("set_system_mode lec") + + append("report_statistics") + + return True + + def generate_open_checkpoint(self) -> bool: + # Make sure that generated-scripts exists. + generated_scripts_dir = os.path.join(self.run_dir, "generated-scripts") + os.makedirs(generated_scripts_dir, exist_ok=True) + + # Script to open results checkpoint + self.create_enter_script() + open_checkpoint_tcl = os.path.join(generated_scripts_dir, "open_checkpoint.tcl") + with open(open_checkpoint_tcl, "w") as f: + f.write("set_gui -mapping") + open_checkpoint_script = os.path.join(generated_scripts_dir, "open_checkpoint") + with open(open_checkpoint_script, "w") as f: + assert super().do_pre_steps(self.first_step) + args = self.start_cmd + args.extend(["-gui", "-restart_checkpoint", "latest"]) + f.write("#!/bin/bash\n") + f.write(f"cd {self.run_dir}\n") + f.write("source enter\n") + f.write(f"$CONFORMAL_BIN -restart_checkpoint latest -dofile {open_checkpoint_tcl}") + os.chmod(open_checkpoint_script, 0o755) + + return True + + def run_conformal(self) -> bool: + # Quit + self.append("exit") + + # Write main dofile + dofile = os.path.join(self.run_dir, f"{self.check}.tcl") + self.write_contents_to_path("\n".join(self.output), dofile) + + # Build args + args = self.start_cmd + args.extend([ + "-nogui", + "-color", + "-tclmode", + "-dofile", dofile + ]) + if self.restart_checkpoint != "": + args.extend([ + "-restart_checkpoint", + self.restart_checkpoint + ]) + + # Temporarily disable colours/tag to make run output more readable. + # TODO: think of a more elegant way to do this? + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + self.run_executable(args, cwd=self.run_dir) + # TODO: check for errors and deal with them + # According to user guide: + # Bit Condition + # 0 Internal error + # 1 Exit status before comparison + # 2 Command error + # 3 Unmapped points + # 4 Non-equivalent points + # 5 Abort or uncompared points exist during any comparison + # 6 Abort or uncompared points exist during last comparison + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + # TODO: check that formal run was successful + + return True + +tool = Conformal diff --git a/hammer/formal/conformal/defaults.yml b/hammer/formal/conformal/defaults.yml new file mode 100644 index 000000000..486655439 --- /dev/null +++ b/hammer/formal/conformal/defaults.yml @@ -0,0 +1,23 @@ +# Configuration options and defaults for Conformal. +# The values specified in this file are the defaults. + +formal.conformal: + # Location of the binary + conformal_lec_bin: "${cadence.cadence_home}/CONFRML/CONFRML${formal.conformal.version}/bin/lec" + conformal_lec_bin_meta: lazysubst # we want later overrides to be able to affect this + conformal_ccd_bin: "${cadence.cadence_home}/CONFRML/CONFRML${formal.conformal.version}/bin/ccd" + conformal_ccd_bin_meta: lazysubst # we want later overrides to be able to affect this + + # Conformal version to use + # Used to locate the binary - e.g. the '212' in ${cadence.cadence_home}/CONFRML/CONFRML212/bin/lec + version: "212" + + # Highest installed license level + # Valid options: L, XL, GXL + license: "GXL" + +formal.inputs: + # Check type to run. Supported types in this plugin: + # - lec: logical equivalence checking + # Note: select features will require a XL/GXL license (see license_level key). + check: "lec" diff --git a/hammer/generate_properties.py b/hammer/generate_properties.py index 50db3d9f3..ccd769ccc 100755 --- a/hammer/generate_properties.py +++ b/hammer/generate_properties.py @@ -294,7 +294,7 @@ def main(args) -> int: if dry_run: print(contents, end='') else: - with open(filename, "w") as f: + with open(filename, "w") as f: # type: ignore f.write(contents) return 0 diff --git a/hammer/lvs/icv/__init__.py b/hammer/lvs/icv/__init__.py new file mode 100644 index 000000000..12d7bccfa --- /dev/null +++ b/hammer/lvs/icv/__init__.py @@ -0,0 +1,240 @@ +# hammer-vlsi plugin for Synopsys IC Validator. +# +# See LICENSE for licence details. + +from hammer.vlsi import HammerToolStep +from hammer.vlsi import HammerLVSTool +from hammer.common.synopsys import SynopsysTool +from hammer.logging import HammerVLSILogging +from hammer.utils import HammerFiletype, get_filetype +import hammer.tech as hammer_tech + +from typing import Dict, List, Optional + +import os +import textwrap + + +class ICVLVS(HammerLVSTool, SynopsysTool): + + def tool_config_prefix(self) -> str: + return "lvs.icv" + + def erc_results_pre_waived(self) -> Dict[str, int]: + return {} + + def lvs_results(self) -> List[str]: + return [] + + def fill_outputs(self) -> bool: + return True + + # TODO: placeholder empty step + def empty_step(self) -> bool: + return True + + @property + def steps(self) -> List[HammerToolStep]: + steps = [self.generate_lvs_run_file, self.generate_lvs_args_file] # TODO: LVS steps require multiple runs of the tool how do we support this? + return self.make_steps_from_methods(steps) + + def do_post_steps(self) -> bool: + assert super().do_post_steps() + return self.run_icv() + + def globally_waived_erc_rules(self) -> List[str]: + return [] + + def run_icv(self) -> bool: + self.create_enter_script() + + # translate all spice & verilog netlists with vue_nettran + self.generate_top_icv_file() + # generate the hcells file if needed + self.generate_hcells_file() + + # set the command arguments + args = [ + self.get_setting("lvs.icv.icv_lvs_bin"), + "-64"] # always want to be in 64-bit mode + if self.version() >= self.version_number("R-2020.09"): + args.extend(["-host_init", str(self.get_setting("vlsi.core.max_threads"))]) + else: + args.append("-dp{}".format(self.get_setting("vlsi.core.max_threads"))) + args.extend([ + "-clf", + self.lvs_args_file, + "-vue", # needed to view results in VUE + "-verbose" # get more than % complete + ]) + args.append(self.lvs_run_file) + + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + self.run_executable(args, cwd=self.run_dir) # TODO: check for errors and deal with them + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + # TODO: check that lvs run was successful + + # Create view_lvs script & icvwb macro script file + # See the README for how this works + os.makedirs(self.generated_scripts_dir, exist_ok=True) + + with open(self.icvwb_macrofile, "w") as f: + # Open socket + f.write("user_socket open 0\n") + # Layer mapping + layerprops_file = self.get_setting("synopsys.layerprops") + if layerprops_file is not None: + f.writelines(l for l in open(layerprops_file)) + + with open(self.view_lvs_script, "w") as f: + f.write(""" + cd {run_dir} + source enter + # Start Synopsys IC Validator WorkBench and wait for port to open before starting VUE + {icvwb} -socket {port} -run {macrofile} {gds} & + while ! nc -z localhost {port}; do + sleep 0.1 + done + {icv_vue} -64 -load {results} -lay icwb -layArgs Port {port} + """.format( + run_dir=self.run_dir, + icvwb=self.get_setting("lvs.icv.icvwb_bin"), + port=self.get_setting("lvs.icv.icvwb_port"), + macrofile=self.icvwb_macrofile, + gds=self.layout_file, + icv_vue=self.get_setting("lvs.icv.icv_vue_bin"), + results=self.lvs_results_db + )) + os.chmod(self.view_lvs_script, 0o755) + + return True + + @property + def hcells_file(self) -> str: + return os.path.join(self.run_dir, "hcells") + + def generate_hcells_file(self) -> None: + with open(self.hcells_file, "w") as f: + f.write("") + # TODO + + def generate_top_icv_file(self) -> None: + library_spice_files = self.technology.read_libs([hammer_tech.filters.spice_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + ilms = list(map(lambda x: x.netlist, self.ilms)) # type: List[str] + + all_files = library_spice_files + self.schematic_files + ilms + spice_files = list(filter(lambda x: get_filetype(x) is HammerFiletype.SPICE, all_files)) + verilog_files = list(filter(lambda x: get_filetype(x) is HammerFiletype.VERILOG, all_files)) + unmatched = set(all_files).symmetric_difference(set(spice_files + verilog_files)) + if unmatched: + raise NotImplementedError("Unsupported netlist type for files: " + str(unmatched)) + + args = [self.get_setting("lvs.icv.icv_nettran_bin"), "-sp-autoDetectBusdelimiter", "FIRST", "-sp"] + args.extend(spice_files) + args.extend(["-verilog"] + verilog_files) + args.extend(["-outName", self.converted_icv_file]) + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + self.run_executable(args, cwd=self.run_dir) # TODO: check for errors and deal with them + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + def generate_lvs_run_file(self) -> bool: + """ Generate the LVS run file self.lvs_run_file and fill its contents """ + with open(self.lvs_run_file, "w") as f: + f.write(self.header.replace("#","//") + "\n\n") + f.write(self.get_additional_lvs_text()) + # Include paths to all supplied decks + for rule in self.get_lvs_decks(): + f.write("#include <{}>\n".format(rule.path)) + return True + + def generate_lvs_args_file(self) -> bool: + """ Generate the LVS args file self.lvs_args_file and fill its contents """ + with open(self.lvs_args_file, "w") as f: + f.write(textwrap.dedent(""" + # Generated by HAMMER + -i {gds}\ + -c {top}\ + -f GDSII\ + -s {sch}\ + -sf ICV + """).format( + gds=self.layout_file, + top=self.top_module, + sch=self.converted_icv_file + ) + ) + # Symbolic variables to set via command. Can also use #define in additional_lvs_text. + defines = self.get_setting("lvs.icv.defines") # type: List[Dict[str, str]] + assert isinstance(defines, list) + if len(defines) > 0: + # Most comprehensive way of covering all List[Dict] possibilities + f.write(" -D " + " -D ".join(map(lambda x: " -D ".join("=".join(_) for _ in x.items()), defines))) + + # Preprocessor directories to include + include_dirs = self.get_setting("lvs.icv.include_dirs") # type: List[str] + assert isinstance(include_dirs, list) + if len(include_dirs) > 0: + f.write(" -I " + " -I ".join(include_dirs)) + + # Config runset file + config_rs = self.get_setting("drc.icv.config_runset") # type: Optional[str] + if config_rs is not None: + f.write(" -config_runset" + config_rs) + return True + + @property + def generated_scripts_dir(self) -> str: + return os.path.join(self.run_dir, "generated-scripts") + + @property + def view_lvs_script(self) -> str: + return os.path.join(self.generated_scripts_dir, "view_lvs") + + @property + def icvwb_macrofile(self) -> str: + return os.path.join(self.generated_scripts_dir, "icvwb_macrofile") + + @property + def lvs_run_file(self) -> str: + return os.path.join(self.run_dir, "lvs_run_file") + + @property + def lvs_args_file(self) -> str: + return os.path.join(self.run_dir, "lvs_args_file") + + @property + def erc_results_file(self) -> str: + return os.path.join(self.run_dir, self.top_module + ".LAYOUT_ERRORS") + + @property + def lvs_results_db(self) -> str: + return os.path.join(self.run_dir, self.top_module + ".vue") + + @property + def lvs_results_file(self) -> str: + return os.path.join(self.run_dir, self.top_module + ".LVS_ERRORS") + + @property + def converted_icv_file(self) -> str: + return os.path.join(self.run_dir, "{top}.icv".format(top=self.top_module)) + + @property + def env_vars(self) -> Dict[str, str]: + """ + Get the list of environment variables required for this tool. + Note to subclasses: remember to include variables from super().env_vars! + """ + result = dict(super().env_vars) + result.update({ + "ICV_HOME_DIR": self.get_setting("lvs.icv.ICV_HOME_DIR"), + "PATH": "{path}:{icv_home}/bin/LINUX.64".format(path=os.environ.copy()["PATH"], icv_home=self.get_setting("lvs.icv.ICV_HOME_DIR")) + }) + return result + + +tool = ICVLVS diff --git a/hammer/lvs/icv/defaults.yml b/hammer/lvs/icv/defaults.yml new file mode 100644 index 000000000..196dfa50a --- /dev/null +++ b/hammer/lvs/icv/defaults.yml @@ -0,0 +1,40 @@ +# Default settings for ICV lvs, for project/technology configuration and overriding. +lvs.icv: + # Location of the binary + icv_lvs_bin: "${synopsys.synopsys_home}/icv/${lvs.icv.version}/bin/LINUX.64/icv" + icv_lvs_bin_meta: lazysubst + + icv_nettran_bin: "${synopsys.synopsys_home}/icv/${lvs.icv.version}/bin/LINUX.64/icv_nettran" + icv_nettran_bin_meta: lazysubst + + icv_vue_bin: "${synopsys.synopsys_home}/icv/${lvs.icv.version}/bin/LINUX.64/icv_vue" + icv_vue_bin_meta: lazysubst + + icvwb_bin: "${synopsys.synopsys_home}/icvwb/${lvs.icv.icvwb_version}/bin/icvwb" + icvwb_bin_meta: lazysubst + + ICV_HOME_DIR: "${synopsys.synopsys_home}/icv/${lvs.icv.version}" + ICV_HOME_DIR_meta: lazysubst + + # type: str + version: "S-2021.06-SP3-2" + # type: str + icvwb_version: "S-2021.06-SP2" + # Port for VUE (violation browser) to communicate with ICVWB (layout browser) + # Any open port 1000 to 65536 allowed + # type: int + icvwb_port: 1234 + + # Symbolic variables passed as -D flags to ICV command. + # Alternatively, #define can be appended as additional_lvs_text. + # type: List[Dict[str, str]] + defines: [] + + # Preprocessor include directories passed as -I to the ICV command. + # type: List[str] + include_dirs: [] + + # Config runset file passed in as -config_runset to the ICV command. + # Generally used to modify compare behavior. + # type: Optional[str] + config_runset: null diff --git a/hammer/par/innovus/__init__.py b/hammer/par/innovus/__init__.py new file mode 100644 index 000000000..21365e014 --- /dev/null +++ b/hammer/par/innovus/__init__.py @@ -0,0 +1,1152 @@ +# hammer-vlsi plugin for Cadence Innovus. +# +# See LICENSE for licence details. + +import shutil +from typing import List, Dict, Optional, Callable, Tuple, Any, cast + +import os +import errno + +from hammer.utils import get_or_else, optional_map +from hammer.vlsi import HammerTool, HammerPlaceAndRouteTool, HammerToolStep, HammerToolHookAction, \ + PlacementConstraintType, HierarchicalMode, ILMStruct, ObstructionType, Margins, Supply, PlacementConstraint, MMMCCornerType +from hammer.vlsi.units import CapacitanceValue +from hammer.logging import HammerVLSILogging +import hammer.tech as hammer_tech +from hammer.tech import RoutingDirection +from hammer.tech.specialcells import CellType +from decimal import Decimal +from hammer.common.cadence import CadenceTool + +# Notes: camelCase commands are the old syntax (deprecated) +# snake_case commands are the new/common UI syntax. +# This plugin should only use snake_case commands. + +class Innovus(HammerPlaceAndRouteTool, CadenceTool): + + def export_config_outputs(self) -> Dict[str, Any]: + outputs = dict(super().export_config_outputs()) + # TODO(edwardw): find a "safer" way of passing around these settings keys. + outputs["par.outputs.seq_cells"] = self.output_seq_cells + outputs["par.outputs.all_regs"] = self.output_all_regs + outputs["par.outputs.sdf_file"] = self.output_sdf_path + outputs["par.outputs.spefs"] = self.output_spef_paths + return outputs + + def fill_outputs(self) -> bool: + if self.ran_write_ilm: + # Check that the ILMs got written. + + ilm_data_dir = "{ilm_dir_name}/mmmc/ilm_data/{top}".format(ilm_dir_name=self.ilm_dir_name, + top=self.top_module) + postRoute_v_gz = os.path.join(ilm_data_dir, "{top}_postRoute.v.gz".format(top=self.top_module)) + + if not os.path.isfile(postRoute_v_gz): + raise ValueError("ILM output postRoute.v.gz %s not found" % (postRoute_v_gz)) + + # Copy postRoute.v.gz to postRoute.ilm.v.gz since that's what Genus seems to expect. + postRoute_ilm_v_gz = os.path.join(ilm_data_dir, "{top}_postRoute.ilm.v.gz".format(top=self.top_module)) + shutil.copyfile(postRoute_v_gz, postRoute_ilm_v_gz) + + # Write output_ilms. + self.output_ilms = [ + ILMStruct(dir=self.ilm_dir_name, data_dir=ilm_data_dir, module=self.top_module, + lef=os.path.join(self.run_dir, "{top}ILM.lef".format(top=self.top_module)), + gds=self.output_gds_filename, + netlist=self.output_netlist_filename, + sim_netlist=self.output_sim_netlist_filename) + ] + else: + self.output_ilms = [] + + # Check that the regs paths were written properly if the write_regs step was run + self.output_seq_cells = self.all_cells_path + self.output_all_regs = self.all_regs_path + if self.ran_write_regs: + if not os.path.isfile(self.all_cells_path): + raise ValueError("Output find_regs_cells.json %s not found" % (self.all_cells_path)) + + if not os.path.isfile(self.all_regs_path): + raise ValueError("Output find_regs_paths.json %s not found" % (self.all_regs_path)) + + if not self.process_reg_paths(self.all_regs_path): + self.logger.error("Failed to process all register paths") + else: + self.logger.info("Did not run write_regs") + + # Check that the par outputs exist if the par run was successful + self.output_gds = self.output_gds_filename + self.output_netlist = self.output_netlist_filename + self.output_sim_netlist = self.output_sim_netlist_filename + self.hcells_list = [] + self.sdf_file = self.output_sdf_path + self.spef_files = self.output_spef_paths + + if self.ran_write_design: + if not os.path.isfile(self.output_gds_filename): + raise ValueError("Output GDS %s not found" % (self.output_gds_filename)) + + if not os.path.isfile(self.output_netlist_filename): + raise ValueError("Output netlist %s not found" % (self.output_netlist_filename)) + + if not os.path.isfile(self.output_sim_netlist_filename): + raise ValueError("Output sim netlist %s not found" % (self.output_sim_netlist_filename)) + + if not os.path.isfile(self.output_sdf_path): + raise ValueError("Output SDF %s not found" % (self.output_sdf_path)) + + for spef_path in self.output_spef_paths: + if not os.path.isfile(spef_path): + raise ValueError("Output SPEF %s not found" % (spef_path)) + else: + self.logger.info("Did not run write_design") + + return True + + @property + def output_gds_filename(self) -> str: + return os.path.join(self.run_dir, "{top}.gds".format(top=self.top_module)) + + @property + def output_netlist_filename(self) -> str: + return os.path.join(self.run_dir, "{top}.lvs.v".format(top=self.top_module)) + + @property + def output_sim_netlist_filename(self) -> str: + return os.path.join(self.run_dir, "{top}.sim.v".format(top=self.top_module)) + + @property + def all_regs_path(self) -> str: + return os.path.join(self.run_dir, "find_regs_paths.json") + + @property + def all_cells_path(self) -> str: + return os.path.join(self.run_dir, "find_regs_cells.json") + + @property + def output_sdf_path(self) -> str: + return os.path.join(self.run_dir, "{top}.par.sdf".format(top=self.top_module)) + + @property + def output_spef_paths(self) -> List[str]: + corners = self.get_mmmc_corners() + if corners: + # Order matters in tool consuming spefs (ensured here by get_mmmc_corners())! + return list(map(lambda c: os.path.join(self.run_dir, "{top}.{corner}.par.spef".format(top=self.top_module, corner=c.name)), corners)) + else: + return [os.path.join(self.run_dir, "{top}.par.spef".format(top=self.top_module))] + + @property + def env_vars(self) -> Dict[str, str]: + v = dict(super().env_vars) + v["INNOVUS_BIN"] = self.get_setting("par.innovus.innovus_bin") + return v + + @property + def _step_transitions(self) -> List[Tuple[str, str]]: + """ + Private helper property to keep track of which steps we ran so that we + can create symlinks. + This is a list of (pre, post) steps + """ + return self.attr_getter("__step_transitions", []) + + @_step_transitions.setter + def _step_transitions(self, value: List[Tuple[str, str]]) -> None: + self.attr_setter("__step_transitions", value) + + def do_pre_steps(self, first_step: HammerToolStep) -> bool: + assert super().do_pre_steps(first_step) + # Restore from the last checkpoint if we're not starting over. + if first_step != self.first_step: + self.verbose_append("read_db pre_{step}".format(step=first_step.name)) + return True + + def do_between_steps(self, prev: HammerToolStep, next: HammerToolStep) -> bool: + assert super().do_between_steps(prev, next) + # Write a checkpoint to disk. + self.verbose_append("write_db pre_{step}".format(step=next.name)) + # Symlink the database to latest for open_chip script later. + self.verbose_append("ln -sfn pre_{step} latest".format(step=next.name)) + self._step_transitions = self._step_transitions + [(prev.name, next.name)] + return True + + def do_post_steps(self) -> bool: + assert super().do_post_steps() + # Create symlinks for post_ to pre_ to improve usability. + try: + for prev, next in self._step_transitions: + os.symlink( + os.path.join(self.run_dir, "pre_{next}".format(next=next)), # src + os.path.join(self.run_dir, "post_{prev}".format(prev=prev)) # dst + ) + except OSError as e: + if e.errno != errno.EEXIST: + self.logger.warning("Failed to create post_* symlinks: " + str(e)) + + # Create db post_ + # TODO: this doesn't work if you're only running the very last step + if len(self._step_transitions) > 0: + last = "post_{step}".format(step=self._step_transitions[-1][1]) + self.verbose_append("write_db {last}".format(last=last)) + # Symlink the database to latest for open_chip script later. + self.verbose_append("ln -sfn {last} latest".format(last=last)) + + return self.run_innovus() + + def get_tool_hooks(self) -> List[HammerToolHookAction]: + return [self.make_persistent_hook(innovus_global_settings)] + + @property + def steps(self) -> List[HammerToolStep]: + steps = [ + self.init_design, + self.floorplan_design, + self.place_bumps, + self.place_tap_cells, + self.power_straps, + self.place_pins, + self.place_opt_design, + self.clock_tree, + self.add_fillers, + self.route_design, + self.opt_design + ] + write_design_step = [ + self.write_regs, + self.write_design + ] # type: List[Callable[[], bool]] + if self.hierarchical_mode == HierarchicalMode.Flat: + # Nothing to do + pass + elif self.hierarchical_mode == HierarchicalMode.Leaf: + # All modules in hierarchical must write an ILM + write_design_step += [self.write_ilm] + elif self.hierarchical_mode == HierarchicalMode.Hierarchical: + # All modules in hierarchical must write an ILM + write_design_step += [self.write_ilm] + elif self.hierarchical_mode == HierarchicalMode.Top: + # No need to write ILM at the top. + # Top needs assemble_design instead. + steps += [self.assemble_design] + pass + else: + raise NotImplementedError("HierarchicalMode not implemented: " + str(self.hierarchical_mode)) + return self.make_steps_from_methods(steps + write_design_step) + + def tool_config_prefix(self) -> str: + return "par.innovus" + + def init_design(self) -> bool: + """Initialize the design.""" + verbose_append = self.verbose_append + + # Perform common path pessimism removal in setup and hold mode + verbose_append("set_db timing_analysis_cppr both") + # Use OCV mode for timing analysis by default + verbose_append("set_db timing_analysis_type ocv") + # Match SDC time units to timing libraries + verbose_append("set_library_unit -time 1{}".format(self.get_time_unit().value_prefix + self.get_time_unit().unit)) + + # Read LEF layouts. + lef_files = self.technology.read_libs([ + hammer_tech.filters.lef_filter + ], hammer_tech.HammerTechnologyUtils.to_plain_item) + if self.hierarchical_mode.is_nonleaf_hierarchical(): + ilm_lefs = list(map(lambda ilm: ilm.lef, self.get_input_ilms())) + lef_files.extend(ilm_lefs) + verbose_append("read_physical -lef {{ {files} }}".format( + files=" ".join(lef_files) + )) + + # Read timing libraries. + mmmc_path = os.path.join(self.run_dir, "mmmc.tcl") + self.write_contents_to_path(self.generate_mmmc_script(), mmmc_path) + verbose_append("read_mmmc {mmmc_path}".format(mmmc_path=mmmc_path)) + + # Read netlist. + # Innovus only supports structural Verilog for the netlist; the Verilog can be optionally compressed. + if not self.check_input_files([".v", ".v.gz"]): + return False + + # We are switching working directories and we still need to find paths. + abspath_input_files = list(map(lambda name: os.path.join(os.getcwd(), name), self.input_files)) + verbose_append("read_netlist {{ {files} }} -top {top}".format( + files=" ".join(abspath_input_files), + top=self.top_module + )) + + if self.hierarchical_mode.is_nonleaf_hierarchical(): + # Read ILMs. + for ilm in self.get_input_ilms(): + # Assumes that the ILM was created by Innovus (or at least the file/folder structure). + verbose_append("read_ilm -cell {module} -directory {dir}".format(dir=ilm.dir, module=ilm.module)) + + # Emit init_power_nets and init_ground_nets in case CPF/UPF is not used + # commit_power_intent does not override power nets defined in "init_power_nets" + spec_mode = self.get_setting("vlsi.inputs.power_spec_mode") # type: str + if spec_mode == "empty": + power_supplies = self.get_independent_power_nets() # type: List[Supply] + power_nets = " ".join(map(lambda s: s.name, power_supplies)) + ground_supplies = self.get_independent_ground_nets() # type: List[Supply] + ground_nets = " ".join(map(lambda s: s.name, ground_supplies)) + verbose_append("set_db init_power_nets {{{n}}}".format(n=power_nets)) + verbose_append("set_db init_ground_nets {{{n}}}".format(n=ground_nets)) + + # Run init_design to validate data and start the Cadence place-and-route workflow. + verbose_append("init_design") + + # Setup power settings from cpf/upf + for l in self.generate_power_spec_commands(): + verbose_append(l) + + # Set design effort. + verbose_append("set_db design_flow_effort {}".format(self.get_setting("par.innovus.design_flow_effort"))) + + # Set "don't use" cells. + for l in self.generate_dont_use_commands(): + self.append(l) + + return True + + def floorplan_design(self) -> bool: + floorplan_tcl = os.path.join(self.run_dir, "floorplan.tcl") + self.write_contents_to_path("\n".join(self.create_floorplan_tcl()), floorplan_tcl) + self.verbose_append("source -echo -verbose {}".format(floorplan_tcl)) + return True + + def place_bumps(self) -> bool: + bumps = self.get_bumps() + if bumps is not None: + bump_array_width = Decimal(str((bumps.x - 1) * bumps.pitch_x)) + bump_array_height = Decimal(str((bumps.y - 1) * bumps.pitch_y)) + fp_consts = self.get_placement_constraints() + fp_width = Decimal(0) + fp_height = Decimal(0) + for const in fp_consts: + if const.type == PlacementConstraintType.TopLevel: + fp_width = const.width + fp_height = const.height + if fp_width == 0 or fp_height == 0: + raise ValueError("Floorplan does not specify a TopLevel constraint or it has no dimensions") + # Center bump array in the middle of floorplan + bump_offset_x = (Decimal(str(fp_width)) - bump_array_width) / 2 + bumps.global_x_offset + bump_offset_y = (Decimal(str(fp_height)) - bump_array_height) / 2+ bumps.global_y_offset + power_ground_nets = list(map(lambda x: x.name, self.get_independent_power_nets() + self.get_independent_ground_nets())) + # TODO: Fix this once the stackup supports vias ucb-bar/hammer#354 + block_layer = self.get_setting("vlsi.technology.bump_block_cut_layer") # type: str + for bump in bumps.assignments: + self.append("create_bump -cell {cell} -location_type cell_center -name_format \"Bump_{c}.{r}\" -orient r0 -location \"{x} {y}\"".format( + cell = bump.custom_cell if bump.custom_cell is not None else bumps.cell, + c = bump.x, + r = bump.y, + x = bump_offset_x + Decimal(str(bump.x - 1)) * Decimal(str(bumps.pitch_x)), + y = bump_offset_y + Decimal(str(bump.y - 1)) * Decimal(str(bumps.pitch_y)))) + if not bump.no_connect: + if bump.name in power_ground_nets: + self.append("select_bumps -bumps \"Bump_{x}.{y}\"".format(x=bump.x, y=bump.y)) + self.append("assign_pg_bumps -selected -nets {n}".format(n=bump.name)) + self.append("deselect_bumps") + else: + self.append("assign_signal_to_bump -bumps \"Bump_{x}.{y}\" -net {n}".format(x=bump.x, y=bump.y, n=bump.name)) + self.append("create_route_blockage {layer_options} \"{llx} {lly} {urx} {ury}\"".format( + layer_options="-layers {{{l}}} -rects".format(l=block_layer) if(self.version() >= self.version_number("181")) else "-cut_layers {{{l}}} -area".format(l=block_layer), + llx = "[get_db bump:Bump_{x}.{y} .bbox.ll.x]".format(x=bump.x, y=bump.y), + lly = "[get_db bump:Bump_{x}.{y} .bbox.ll.y]".format(x=bump.x, y=bump.y), + urx = "[get_db bump:Bump_{x}.{y} .bbox.ur.x]".format(x=bump.x, y=bump.y), + ury = "[get_db bump:Bump_{x}.{y} .bbox.ur.y]".format(x=bump.x, y=bump.y))) + return True + + def place_tap_cells(self) -> bool: + tap_cells = self.technology.get_special_cell_by_type(CellType.TapCell) + + if len(tap_cells) == 0: + self.logger.warning("Tap cells are improperly defined in the tech plugin and will not be added. This step should be overridden with a user hook or tapcell special cell should be added to the tech.json.") + return True + + tap_cell = tap_cells[0].name[0] + + try: + interval = self.get_setting("vlsi.technology.tap_cell_interval") + offset = self.get_setting("vlsi.technology.tap_cell_offset") + self.append("set_db add_well_taps_cell {TAP_CELL}".format(TAP_CELL=tap_cell)) + self.append("add_well_taps -cell_interval {INTERVAL} -in_row_offset {OFFSET}".format(INTERVAL=interval, OFFSET=offset)) + except KeyError: + pass + return True + + def place_pins(self) -> bool: + fp_consts = self.get_placement_constraints() + topconst = None # type: Optional[PlacementConstraint] + for const in fp_consts: + if const.type == PlacementConstraintType.TopLevel: + topconst = const + if topconst is None: + self.logger.fatal("Cannot find top-level constraints to place pins") + return False + + const = cast(PlacementConstraint, topconst) + assert isinstance(const.margins, Margins), "Margins must be defined for the top level" + fp_llx = const.margins.left + fp_lly = const.margins.bottom + fp_urx = const.width - const.margins.right + fp_ury = const.height - const.margins.top + + pin_assignments = self.get_pin_assignments() + self.verbose_append("set_db assign_pins_edit_in_batch true") + + promoted_pins = [] # type: List[str] + for pin in pin_assignments: + if pin.preplaced: + # First set promoted pins + self.verbose_append("set_promoted_macro_pin -pins {{ {p} }}".format(p=pin.pins)) + promoted_pins.extend(pin.pins.split()) + else: + # TODO: Do we need pin blockages for our layers? + # Seems like we will only need special pin blockages if the vias are larger than the straps + + cadence_side = None # type: Optional[str] + if pin.side is not None: + if pin.side == "internal": + cadence_side = "inside" + else: + cadence_side = pin.side + side_arg = get_or_else(optional_map(cadence_side, lambda s: "-side " + s), "") + + start_arg = "" + end_arg = "" + assign_arg = "" + pattern_arg = "" + + if pin.location is None: + start_arg = "-{start} {{ {sx} {sy} }}".format( + start="start" if pin.side == "bottom" or pin.side == "right" else "end", + sx=fp_urx if pin.side != "left" else fp_llx, + sy=fp_ury if pin.side != "bottom" else fp_lly) + + end_arg = "-{end} {{ {ex} {ey} }}".format( + end="end" if pin.side == "bottom" or pin.side == "right" else "start", + ex=fp_llx if pin.side != "right" else fp_urx, + ey=fp_lly if pin.side != "top" else fp_ury + ) + if pin.layers and len(pin.layers) > 1: + pattern_arg = "-pattern fill_optimised" + else: + pattern_arg = "-spread_type range" + else: + assign_arg = "-assign {{ {x} {y} }}".format(x=pin.location[0], y=pin.location[1]) + + layers_arg = "" + if pin.layers is not None and len(pin.layers) > 0: + layers_arg = "-layer {{ {} }}".format(" ".join(pin.layers)) + + width_arg = get_or_else(optional_map(pin.width, lambda f: "-pin_width {f}".format(f=f)), "") + depth_arg = get_or_else(optional_map(pin.depth, lambda f: "-pin_depth {f}".format(f=f)), "") + + cmd = [ + "edit_pin", + "-fixed_pin", + "-pin", pin.pins, + "-hinst", self.top_module, + pattern_arg, + layers_arg, + side_arg, + start_arg, + end_arg, + assign_arg, + width_arg, + depth_arg + ] + + self.verbose_append(" ".join(cmd)) + + # In case the * wildcard is used after preplaced pins, this will place promoted pins correctly. + # Innovus errors instead of warns if the name matching does not work (e.g. bad wildcards). + for ppin in promoted_pins: + self.verbose_append("assign_io_pins -move_fixed_pin -pins [get_db [get_db pins -if {{.name == {p} }}] .net.name]".format(p=ppin)) + + self.verbose_append("set_db assign_pins_edit_in_batch false") + return True + + def power_straps(self) -> bool: + """Place the power straps for the design.""" + power_straps_tcl = os.path.join(self.run_dir, "power_straps.tcl") + self.write_contents_to_path("\n".join(self.create_power_straps_tcl()), power_straps_tcl) + self.verbose_append("source -echo -verbose {}".format(power_straps_tcl)) + return True + + def place_opt_design(self) -> bool: + """Place the design and do pre-routing optimization.""" + self.verbose_append("place_opt_design") + return True + + def clock_tree(self) -> bool: + """Setup and route a clock tree for clock nets.""" + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.verbose_append(''' + flatten_ilm + update_constraint_mode -name my_constraint_mode -ilm_sdc_files {sdc} + '''.format(sdc=self.post_synth_sdc), clean=True) + if len(self.get_clock_ports()) > 0: + # Ignore clock tree when there are no clocks + self.verbose_append("create_clock_tree_spec") + if bool(self.get_setting("par.innovus.use_cco")): + # -hold is a secret flag for ccopt_design (undocumented anywhere) + self.verbose_append("ccopt_design -hold -report_dir hammer_cts_debug -report_prefix hammer_cts") + else: + self.verbose_append("clock_design") + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.verbose_append("unflatten_ilm") + return True + + def add_fillers(self) -> bool: + """add decap and filler cells""" + decaps = self.technology.get_special_cell_by_type(CellType.Decap) + stdfillers = self.technology.get_special_cell_by_type(CellType.StdFiller) + + if len(decaps) == 0: + self.logger.info("The technology plugin 'special cells: decap' field does not exist. It should specify a list of decap cells. Filling with stdfiller instead.") + else: + decap_cells = decaps[0].name + decap_caps = [] # type: List[float] + if decaps[0].size is not None: + decap_caps = list(map(lambda x: CapacitanceValue(x).value_in_units("fF"), decaps[0].size)) + if len(decap_cells) != len(decap_caps): + self.logger.error("Each decap cell in the name list must have a corresponding decapacitance value in the size list.") + decap_consts = list(filter(lambda x: x.target=="capacitance", self.get_decap_constraints())) + if len(decap_consts) > 0: + if decap_caps is None: + self.logger.warning("No decap capacitances specified but decap constraints with target: 'capacitance' exist. Add decap capacitances to the tech plugin!") + else: + for (cell, cap) in zip(decap_cells, decap_caps): + self.append("add_decap_cell_candidates {CELL} {CAP}".format(CELL=cell, CAP=cap)) + for const in decap_consts: + assert isinstance(const.capacitance, CapacitanceValue) + area_str = "" + if all(c is not None for c in (const.x, const.y, const.width, const.height)): + assert isinstance(const.x, Decimal) + assert isinstance(const.y, Decimal) + assert isinstance(const.width, Decimal) + assert isinstance(const.height, Decimal) + area_str = " ".join(("-area", str(const.x), str(const.y), str(const.x+const.width), str(const.y+const.height))) + self.verbose_append("add_decaps -effort high -total_cap {CAP} {AREA}".format( + CAP=const.capacitance.value_in_units("fF"), AREA=area_str)) + + if len(stdfillers) == 0: + self.logger.warning( + "The technology plugin 'special cells: stdfiller' field does not exist. It should specify a list of (non IO) filler cells. No filler will be added. You can override this with an add_fillers hook if you do not specify filler cells in the technology plugin.") + else: + # Decap cells as fillers + if len(decaps) > 0: + fill_cells = list(map(lambda c: str(c), decaps[0].name)) + self.append("set_db add_fillers_cells \"{FILLER}\"".format(FILLER=" ".join(fill_cells))) + # Targeted decap constraints + decap_consts = list(filter(lambda x: x.target=="density", self.get_decap_constraints())) + for const in decap_consts: + area_str = "" + if all(c is not None for c in (const.x, const.y, const.width, const.height)): + assert isinstance(const.x, Decimal) + assert isinstance(const.y, Decimal) + assert isinstance(const.width, Decimal) + assert isinstance(const.height, Decimal) + area_str = " ".join(("-area", str(const.x), str(const.y), str(const.x+const.width), str(const.y+const.height))) + self.verbose_append("add_fillers -density {DENSITY} {AREA}".format( + DENSITY=str(const.density), AREA=area_str)) + # Or, fill everywhere if no decap constraints given + if len(self.get_decap_constraints()) == 0: + self.verbose_append("add_fillers") + + # Then the rest is stdfillers + fill_cells = list(map(lambda c: str(c), stdfillers[0].name)) + self.append("set_db add_fillers_cells \"{FILLER}\"".format(FILLER=" ".join(fill_cells))) + self.verbose_append("add_fillers") + return True + + + def route_design(self) -> bool: + """Route the design.""" + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.verbose_append("flatten_ilm") + + # Allow express design effort to complete running. + # By default, route_design will abort in express mode with + # "WARNING (NRIG-142) Express flow by default will not run routing". + self.verbose_append("set_db design_express_route true") + + self.verbose_append("route_design") + return True + + def opt_design(self) -> bool: + """ + Post-route optimization and fix setup & hold time violations. + -expanded_views creates timing reports for each MMMC view. + """ + self.verbose_append("opt_design -post_route -setup -hold -expanded_views") + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.verbose_append("unflatten_ilm") + return True + + def assemble_design(self) -> bool: + # TODO: implement the assemble_design step. + return True + + def write_netlist(self) -> bool: + # Don't use virtual connects (using colon, e.g. VSS:) because they mess up LVS + self.verbose_append("set_db write_stream_virtual_connection false") + + # Connect power nets that are tied together + for pwr_gnd_net in (self.get_all_power_nets() + self.get_all_ground_nets()): + if pwr_gnd_net.tie is not None: + self.verbose_append("connect_global_net {tie} -type net -net_base_name {net}".format(tie=pwr_gnd_net.tie, net=pwr_gnd_net.name)) + + # Output the Verilog netlist for the design and include physical cells (-phys) like decaps and fill + self.verbose_append("write_netlist {netlist} -top_module_first -top_module {top} -exclude_leaf_cells -phys -flat -exclude_insts_of_cells {{ {pcells} }} ".format( + netlist=self.output_netlist_filename, + top=self.top_module, + pcells=" ".join(self.get_physical_only_cells()) + )) + + self.verbose_append("write_netlist {netlist} -top_module_first -top_module {top} -exclude_leaf_cells -exclude_insts_of_cells {{ {pcells} }} ".format( + netlist=self.output_sim_netlist_filename, + top=self.top_module, + pcells=" ".join(self.get_physical_only_cells()) + )) + + return True + + def write_gds(self) -> bool: + map_file = get_or_else( + optional_map(self.get_gds_map_file(), lambda f: "-map_file {}".format(f)), + "" + ) + + gds_files = self.technology.read_libs([ + hammer_tech.filters.gds_filter + ], hammer_tech.HammerTechnologyUtils.to_plain_item) + if self.hierarchical_mode.is_nonleaf_hierarchical(): + ilm_gds = list(map(lambda ilm: ilm.gds, self.get_input_ilms())) + gds_files.extend(ilm_gds) + + # If we are not merging, then we want to use -output_macros. + # output_macros means that Innovus should take any macros it has and + # just output the cells into the GDS. These cells will not have physical + # information inside them and will need to be merged with some other + # step later. We do not care about uniquifying them because Innovus will + # output a single cell for each instance (essentially already unique). + + # On the other hand, if we tell Innovus to do the merge then it is going + # to get a GDS with potentially multiple child cells and we then tell it + # to uniquify these child cells in case of name collisons. Without that + # we could have one child that applies to all cells of that name which + # is often not what you want. + # For example, if macro ADC1 has a subcell Comparator which is different + # from ADC2's subcell Comparator, we don't want ADC1's Comparator to + # replace ADC2's Comparator. + # Note that cells not present in the GDSes to be merged will be emitted + # as-is in the output (like with -output_macros). + merge_options = "-output_macros" if not self.get_setting("par.inputs.gds_merge") else "-uniquify_cell_names -merge {{ {} }}".format( + " ".join(gds_files) + ) + + # If the user has specified the par.inputs.gds_precision_mode as + # "auto", we make write_stream modify the GDS precision according to + # the value of par.inputs.gds_precision. If this setting is not + # specified then we fall back to the default behavior, which is to use + # the units specified in the LEF. See documentation for write_stream + # for what this switch does and what valid values are. + unit = "" + if (self.get_setting("par.inputs.gds_precision_mode") == "manual"): + gds_precision = self.get_setting("par.inputs.gds_precision") or "" + valid_values = [100, 200, 1000, 2000, 10000, 20000] + if gds_precision in valid_values: + unit = "-unit %s" % gds_precision + else: + self.logger.error( + "par.inputs.gds_precision value of \"%s\" must be one of %s" %( + gds_precision, ', '.join([str(x) for x in valid_values]))); + return False + # "auto", i.e. not "manual", means not specifying anything extra. + + self.verbose_append( + "write_stream -mode ALL {map_file} {merge_options} {unit} {gds}".format( + map_file=map_file, + merge_options=merge_options, + gds=self.output_gds_filename, + unit=unit + )) + return True + + def write_sdf(self) -> bool: + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.verbose_append("flatten_ilm") + + # Output the Standard Delay Format File for use in timing annotated gate level simulations + self.verbose_append("write_sdf {run_dir}/{top}.par.sdf".format(run_dir=self.run_dir, top=self.top_module)) + + return True + + def write_spefs(self) -> bool: + # Output a SPEF file that contains the parasitic extraction results + self.verbose_append("set_db extract_rc_coupled true") + self.verbose_append("extract_rc") + corners = self.get_mmmc_corners() + if corners: + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + corner_type_name = "setup" + elif corner.type is MMMCCornerType.Hold: + corner_type_name = "hold" + elif corner.type is MMMCCornerType.Extra: + corner_type_name = "extra" + else: + raise ValueError("Unsupported MMMCCornerType") + + self.verbose_append("write_parasitics -spef_file {run_dir}/{top}.{cname}.par.spef -rc_corner {cname}.{ctype}_rc".format(run_dir=self.run_dir, top=self.top_module, cname=corner.name, ctype=corner_type_name)) + + else: + self.verbose_append("write_parasitics -spef_file {run_dir}/{top}.par.spef".format(run_dir=self.run_dir, top=self.top_module)) + + return True + + + + @property + def output_innovus_lib_name(self) -> str: + return "{top}_FINAL".format(top=self.top_module) + + @property + def generated_scripts_dir(self) -> str: + return os.path.join(self.run_dir, "generated-scripts") + + @property + def open_chip_script(self) -> str: + return os.path.join(self.generated_scripts_dir, "open_chip") + + @property + def open_chip_tcl(self) -> str: + return self.open_chip_script + ".tcl" + + def write_regs(self) -> bool: + """write regs info to be read in for simulation register forcing""" + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.append('flatten_ilm') + self.append(self.child_modules_tcl()) + self.append(self.write_regs_tcl()) + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.append('unflatten_ilm') + self.ran_write_regs = True + return True + + def write_design(self) -> bool: + # Save the Innovus design. + self.verbose_append("write_db {lib_name} -def -verilog".format( + lib_name=self.output_innovus_lib_name + )) + + # Write netlist + self.write_netlist() + + # GDS streamout. + self.write_gds() + + # Write SDF + self.write_sdf() + + # Write SPEF + self.write_spefs() + + # Make sure that generated-scripts exists. + os.makedirs(self.generated_scripts_dir, exist_ok=True) + + self.ran_write_design=True + + return True + + @property + def ran_write_regs(self) -> bool: + """The write_regs step sets this to True if it was run.""" + return self.attr_getter("_ran_write_regs", False) + + @ran_write_regs.setter + def ran_write_regs(self, val: bool) -> None: + self.attr_setter("_ran_write_regs", val) + + @property + def ran_write_design(self) -> bool: + """The write_design step sets this to True if it was run.""" + return self.attr_getter("_ran_write_design", False) + + @ran_write_design.setter + def ran_write_design(self, val: bool) -> None: + self.attr_setter("_ran_write_design", val) + + @property + def ran_write_ilm(self) -> bool: + """The write_ilm stage sets this to True if it was run.""" + return self.attr_getter("_ran_write_ilm", False) + + @ran_write_ilm.setter + def ran_write_ilm(self, val: bool) -> None: + self.attr_setter("_ran_write_ilm", val) + + @property + def ilm_dir_name(self) -> str: + return os.path.join(self.run_dir, "{top}ILMDir".format(top=self.top_module)) + + def write_ilm(self) -> bool: + """Run time_design and write out the ILM.""" + self.verbose_append("time_design -post_route") + self.verbose_append("time_design -post_route -hold") + self.verbose_append("check_process_antenna") + self.verbose_append("write_lef_abstract -5.8 {top}ILM.lef".format(top=self.top_module)) + self.verbose_append("write_ilm -model_type all -to_dir {ilm_dir_name} -type_flex_ilm ilm".format( + ilm_dir_name=self.ilm_dir_name)) + self.ran_write_ilm = True + return True + + def run_innovus(self) -> bool: + # Quit Innovus. + self.verbose_append("exit") + + # Create par script. + par_tcl_filename = os.path.join(self.run_dir, "par.tcl") + self.write_contents_to_path("\n".join(self.output), par_tcl_filename) + + # Make sure that generated-scripts exists. + os.makedirs(self.generated_scripts_dir, exist_ok=True) + + # Create open_chip script pointing to latest (symlinked to post_). + self.output.clear() + assert super().do_pre_steps(self.first_step) + self.append("read_db latest") + self.write_contents_to_path("\n".join(self.output), self.open_chip_tcl) + + with open(self.open_chip_script, "w") as f: + f.write("""#!/bin/bash + cd {run_dir} + source enter + $INNOVUS_BIN -common_ui -win -files {open_chip_tcl} + """.format(run_dir=self.run_dir, open_chip_tcl=self.open_chip_tcl)) + os.chmod(self.open_chip_script, 0o755) + + # Build args. + args = [ + self.get_setting("par.innovus.innovus_bin"), + "-nowin", # Prevent the GUI popping up. + "-common_ui", + "-files", par_tcl_filename + ] + + # Temporarily disable colours/tag to make run output more readable. + # TODO: think of a more elegant way to do this? + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + self.run_executable(args, cwd=self.run_dir) # TODO: check for errors and deal with them + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + # TODO: check that par run was successful + + return True + + def create_floorplan_tcl(self) -> List[str]: + """ + Create a floorplan TCL depending on the floorplan mode. + """ + output = [] # type: List[str] + + floorplan_mode = str(self.get_setting("par.innovus.floorplan_mode")) + if floorplan_mode == "manual": + floorplan_script_contents = str(self.get_setting("par.innovus.floorplan_script_contents")) + # TODO(edwardw): proper source locators/SourceInfo + output.append("# Floorplan manually specified from HAMMER") + output.extend(floorplan_script_contents.split("\n")) + elif floorplan_mode == "generate": + output.extend(self.generate_floorplan_tcl()) + elif floorplan_mode == "auto": + output.append("# Using auto-generated floorplan") + output.append("plan_design") + spacing = self.get_setting("par.blockage_spacing") + bot_layer = self.get_stackup().get_metal_by_index(1).name + top_layer = self.get_setting("par.blockage_spacing_top_layer") + if top_layer is not None: + output.append("create_place_halo -all_blocks -halo_deltas {{{s} {s} {s} {s}}} -snap_to_site".format( + s=spacing)) + output.append("create_route_halo -all_blocks -bottom_layer {b} -space {s} -top_layer {t}".format( + b=bot_layer, t=top_layer, s=spacing)) + else: + if floorplan_mode != "blank": + self.logger.error("Invalid floorplan_mode {mode}. Using blank floorplan.".format(mode=floorplan_mode)) + # Write blank floorplan + output.append("# Blank floorplan specified from HAMMER") + return output + + @staticmethod + def generate_chip_size_constraint(width: Decimal, height: Decimal, left: Decimal, bottom: Decimal, right: Decimal, + top: Decimal, site: str) -> str: + """ + Given chip width/height and margins, generate an Innovus TCL command to create the floorplan. + Also requires a technology specific name for the core site + """ + + site_str = "-site " + site + + # -flip -f allows standard cells to be flipped correctly during place-and-route + return ("create_floorplan -core_margins_by die -flip f " + "-die_size_by_io_height max {site_str} " + "-die_size {{ {width} {height} {left} {bottom} {right} {top} }}").format( + site_str=site_str, + width=width, + height=height, + left=left, + bottom=bottom, + right=right, + top=top + ) + + def generate_floorplan_tcl(self) -> List[str]: + """ + Generate a TCL floorplan for Innovus based on the input config/IR. + Not to be confused with create_floorplan_tcl, which calls this function. + """ + output = [] # type: List[str] + + # TODO(edwardw): proper source locators/SourceInfo + output.append("# Floorplan automatically generated from HAMMER") + + # Top-level chip size constraint. + # Default/fallback constraints if no other constraints are provided. + # TODO snap this to a core site + chip_size_constraint = self.generate_chip_size_constraint( + site=self.technology.get_placement_site().name, + width=Decimal("1000"), height=Decimal("1000"), + left=Decimal("100"), bottom=Decimal("100"), + right=Decimal("100"), top=Decimal("100") + ) + + floorplan_constraints = self.get_placement_constraints() + global_top_layer = self.get_setting("par.blockage_spacing_top_layer") # type: Optional[str] + + ############## Actually generate the constraints ################ + for constraint in floorplan_constraints: + # Floorplan names/insts need to not include the top-level module, + # despite the internal get_db commands including the top-level module... + # e.g. Top/foo/bar -> foo/bar + new_path = "/".join(constraint.path.split("/")[1:]) + + if new_path == "": + assert constraint.type == PlacementConstraintType.TopLevel, "Top must be a top-level/chip size constraint" + margins = constraint.margins + assert margins is not None + # Set top-level chip dimensions. + chip_size_constraint = self.generate_chip_size_constraint( + site=self.technology.get_placement_site().name, + width=constraint.width, + height=constraint.height, + left=margins.left, + bottom=margins.bottom, + right=margins.right, + top=margins.top + ) + else: + orientation = constraint.orientation if constraint.orientation is not None else "r0" + if constraint.create_physical: + output.append("create_inst -cell {cell} -inst {inst} -location {{{x} {y}}} -orient {orientation} -physical -status fixed".format( + cell=constraint.master, + inst=new_path, + x=constraint.x, + y=constraint.y, + orientation=orientation + )) + if constraint.type == PlacementConstraintType.Dummy: + pass + elif constraint.type == PlacementConstraintType.Placement: + output.append("create_guide -name {name} -area {x1} {y1} {x2} {y2}".format( + name=new_path, + x1=constraint.x, + x2=constraint.x + constraint.width, + y1=constraint.y, + y2=constraint.y + constraint.height + )) + elif constraint.type == PlacementConstraintType.Overlap: + output.append("place_inst {inst} {x} {y} {orientation}{fixed}".format( + inst=new_path, + x=constraint.x, + y=constraint.y, + orientation=orientation, + fixed=" -fixed" if constraint.create_physical else "" + )) + elif constraint.type in [PlacementConstraintType.HardMacro, PlacementConstraintType.Hierarchical]: + output.append("place_inst {inst} {x} {y} {orientation}{fixed}".format( + inst=new_path, + x=constraint.x, + y=constraint.y, + orientation=orientation, + fixed=" -fixed" if constraint.create_physical else "" + )) + spacing = self.get_setting("par.blockage_spacing") + if constraint.top_layer is not None: + current_top_layer = constraint.top_layer # type: Optional[str] + elif global_top_layer is not None: + current_top_layer = global_top_layer + else: + current_top_layer = None + if current_top_layer is not None: + bot_layer = self.get_stackup().get_metal_by_index(1).name + output.append("create_place_halo -insts {inst} -halo_deltas {{{s} {s} {s} {s}}} -snap_to_site".format( + inst=new_path, s=spacing)) + output.append("create_route_halo -bottom_layer {b} -space {s} -top_layer {t} -inst {inst}".format( + inst=new_path, b=bot_layer, t=current_top_layer, s=spacing)) + elif constraint.type == PlacementConstraintType.Obstruction: + obs_types = get_or_else(constraint.obs_types, []) # type: List[ObstructionType] + if ObstructionType.Place in obs_types: + output.append("create_place_blockage -area {{{x} {y} {urx} {ury}}}".format( + x=constraint.x, + y=constraint.y, + urx=constraint.x+constraint.width, + ury=constraint.y+constraint.height + )) + if ObstructionType.Route in obs_types: + output.append("create_route_blockage -layers {{{layers}}} -spacing 0 -{area_flag} {{{x} {y} {urx} {ury}}}".format( + x=constraint.x, + y=constraint.y, + urx=constraint.x+constraint.width, + ury=constraint.y+constraint.height, + area_flag="rects" if self.version() >= self.version_number("181") else "area", + layers="all" if constraint.layers is None else " ".join(get_or_else(constraint.layers, [])) + )) + if ObstructionType.Power in obs_types: + output.append("create_route_blockage -pg_nets -layers {{{layers}}} -{area_flag} {{{x} {y} {urx} {ury}}}".format( + x=constraint.x, + y=constraint.y, + urx=constraint.x+constraint.width, + ury=constraint.y+constraint.height, + area_flag="rects" if self.version() >= self.version_number("181") else "area", + layers="all" if constraint.layers is None else " ".join(get_or_else(constraint.layers, [])) + )) + else: + assert False, "Should not reach here" + return [chip_size_constraint] + output + + def specify_std_cell_power_straps(self, blockage_spacing: Decimal, bbox: Optional[List[Decimal]], nets: List[str]) -> List[str]: + """ + Generate a list of TCL commands that build the low-level standard cell power strap rails. + This will use the -master option to create power straps based on technology.core.tap_cell_rail_reference. + The layer is set by technology.core.std_cell_rail_layer, which should be the highest metal layer in the std cell rails. + + :param bbox: The optional (2N)-point bounding box of the area to generate straps. By default the entire core area is used. + :param nets: A list of power net names (e.g. ["VDD", "VSS"]). Currently only two are supported. + :return: A list of TCL commands that will generate power straps on rails. + """ + layer_name = self.get_setting("technology.core.std_cell_rail_layer") + layer = self.get_stackup().get_metal(layer_name) + results = [ + "# Power strap definition for layer {} (rails):\n".format(layer_name), + "reset_db -category add_stripes", + "set_db add_stripes_stacked_via_bottom_layer {}".format(layer_name), + "set_db add_stripes_stacked_via_top_layer {}".format(layer_name), + "set_db add_stripes_spacing_from_block {}".format(blockage_spacing) + ] + tapcell = self.get_setting("technology.core.tap_cell_rail_reference") + options = [ + "-pin_layer", layer_name, + "-layer", layer_name, + "-over_pins", "1", + "-master", "\"{}\"".format(tapcell), + "-block_ring_bottom_layer_limit", layer_name, + "-block_ring_top_layer_limit", layer_name, + "-pad_core_ring_bottom_layer_limit", layer_name, + "-pad_core_ring_top_layer_limit", layer_name, + "-direction", str(layer.direction), + "-width", "pin_width", + "-nets", "{ %s }" % " ".join(nets) + ] + if bbox is not None: + options.extend([ + "-area", "{ %s }" % " ".join(map(str, bbox)) + ]) + results.append("add_stripes " + " ".join(options) + "\n") + return results + + def specify_power_straps(self, layer_name: str, bottom_via_layer_name: str, blockage_spacing: Decimal, pitch: Decimal, width: Decimal, spacing: Decimal, offset: Decimal, bbox: Optional[List[Decimal]], nets: List[str], add_pins: bool) -> List[str]: + """ + Generate a list of TCL commands that will create power straps on a given layer. + This is a low-level, cad-tool-specific API. It is designed to be called by higher-level methods, so calling this directly is not recommended. + This method assumes that power straps are built bottom-up, starting with standard cell rails. + + :param layer_name: The layer name of the metal on which to create straps. + :param bottom_via_layer_name: The layer name of the lowest metal layer down to which to drop vias. + :param blockage_spacing: The minimum spacing between the end of a strap and the beginning of a macro or blockage. + :param pitch: The pitch between groups of power straps (i.e. from left edge of strap A to the next left edge of strap A). + :param width: The width of each strap in a group. + :param spacing: The spacing between straps in a group. + :param offset: The offset to start the first group. + :param bbox: The optional (2N)-point bounding box of the area to generate straps. By default the entire core area is used. + :param nets: A list of power nets to create (e.g. ["VDD", "VSS"], ["VDDA", "VSS", "VDDB"], ... etc.). + :param add_pins: True if pins are desired on this layer; False otherwise. + :return: A list of TCL commands that will generate power straps. + """ + # TODO check that this has been not been called after a higher-level metal and error if so + # TODO warn if the straps are off-pitch + results = ["# Power strap definition for layer %s:\n" % layer_name] + results.extend([ + "reset_db -category add_stripes", + "set_db add_stripes_stacked_via_top_layer {}".format(layer_name), + "set_db add_stripes_stacked_via_bottom_layer {}".format(bottom_via_layer_name), + "set_db add_stripes_trim_antenna_back_to_shape {stripe}", + "set_db add_stripes_spacing_from_block {}".format(blockage_spacing) + ]) + layer = self.get_stackup().get_metal(layer_name) + options = [ + "-create_pins", ("1" if (add_pins) else "0"), + "-block_ring_bottom_layer_limit", layer_name, + "-block_ring_top_layer_limit", bottom_via_layer_name, + "-direction", str(layer.direction), + "-layer", layer_name, + "-nets", "{%s}" % " ".join(nets), + "-pad_core_ring_bottom_layer_limit", bottom_via_layer_name, + "-set_to_set_distance", str(pitch), + "-spacing", str(spacing), + "-switch_layer_over_obs", "0", + "-width", str(width) + ] + # Where to get the io-to-core offset from a bbox + index = 0 + if layer.direction == RoutingDirection.Horizontal: + index = 1 + elif layer.direction != RoutingDirection.Vertical: + raise ValueError("Cannot handle routing direction {d} for layer {l} when creating power straps".format(d=str(layer.direction), l=layer_name)) + + if bbox is not None: + options.extend([ + "-area", "{ %s }" % " ".join(map(str, bbox)), + "-start", str(offset + bbox[index]) + ]) + + else: + # Just put straps in the core area + options.extend([ + "-area", "[get_db designs .core_bbox]", + "-start", "[expr [lindex [lindex [get_db designs .core_bbox] 0] {index}] + {offset}]".format(index=index, offset=offset) + ]) + results.append("add_stripes " + " ".join(options) + "\n") + return results + +def innovus_global_settings(ht: HammerTool) -> bool: + """Settings that need to be reapplied at every tool invocation""" + assert isinstance(ht, HammerPlaceAndRouteTool) + assert isinstance(ht, CadenceTool) + ht.create_enter_script() + + # Python sucks here for verbosity + verbose_append = ht.verbose_append + + # Generic settings + verbose_append("set_db design_process_node {}".format(ht.get_setting("vlsi.core.node"))) + verbose_append("set_multi_cpu_usage -local_cpu {}".format(ht.get_setting("vlsi.core.max_threads"))) + + return True + +tool = Innovus diff --git a/hammer/par/innovus/defaults.yml b/hammer/par/innovus/defaults.yml new file mode 100644 index 000000000..5ab660c51 --- /dev/null +++ b/hammer/par/innovus/defaults.yml @@ -0,0 +1,35 @@ +# Configuration options and defaults for Innovus. +# The values specified in this file are the defaults. + +par.innovus: + # Location of the binary. + innovus_bin: "${cadence.cadence_home}/INNOVUS/INNOVUS${par.innovus.version}/bin/innovus" + innovus_bin_meta: lazysubst # we want later overrides to be able to affect this + + # Innovus version to use. + # Used to locate the binary - e.g. the '171' in ${cadence.cadence_home}/INNOVUS/INNOVUS171/bin/innovus + # 171_ISR3 supports ILMs properly in contrast to 171. + version: "171_ISR3" + + # Design flow effort. + # Valid options: express (fastest), standard, and extreme (slowest). + # Default: express to increase turnaround speed. + design_flow_effort: "express" + + # Floorplanning SDC constraints to use. + # Valid options are: + # - blank - Specify no floorplanning constraints (likely won't work) + # - manual - Specify the contents of a manual SDC floorplanning script to use in floorplan_script_contents. + # - generate - Generate a floorplan from the hammer-vlsi config/IR. + # - auto - Use Innovus's `plan_design` command to auto-generate a floorplan. Will have poor, but sane results + # If you specify this, you must also specify the floorplanning_script config below. The floorplanning_script config will be ignored in other modes. + # TODO(edwardw): Put in new floorplanning thing here when done. + floorplan_mode: generate + floorplan_script_contents: null + + # Use "clock concurrent optimization" in clock tree synthesis. + # If true, uses the ccopt_design command which includes this feature + # by default in Innovus Common UI. + # Note that this requires an optional licence (enccco). + # type: bool + use_cco: true diff --git a/hammer/par/innovus/dump_stackup_to_json.tcl b/hammer/par/innovus/dump_stackup_to_json.tcl new file mode 100644 index 000000000..3a19b5214 --- /dev/null +++ b/hammer/par/innovus/dump_stackup_to_json.tcl @@ -0,0 +1,69 @@ +# Run me in innovus + +set json "./stackup.json" +set rdl_layer AP + +set json [open $json "w"] + +set layers {} + +foreach layer [get_db layers] { + set index [get_db $layer .route_index] + set direction [get_db $layer .direction] + set name [get_db $layer .name] + # If the index is -1 it's not a routing layer + if {$index >= 0 && $direction != "unassigned"} { + if {$direction == "vertical"} { + set offset [get_db $layer .offset_x] + } else { + set offset [get_db $layer .offset_y] + } + if {$name == $rdl_layer} { + set offset 0.0 + set direction "redistribution" + } + set min_width [get_db $layer .min_width] + set max_width [get_db $layer .max_width] + set min_spacing [get_db $layer .min_spacing] + # Note: there is a pitch field in the database, but it's for same-colored metals + # We are writing this to be color-agnostic, so we'll assume that the pitch is the + # Red-to-Green pitch + set pitch [expr $min_width + $min_spacing] + set spacing_table [get_db $layer .spacing_tables] + # FIXME This might be broken? What do we do with more than one spacing table? + # For now take the last one, since that appears to have correct data for colored metals + set spacing_table [lrange [lindex [lindex $spacing_table 0] end] 2 end] + set widths_and_spacings {} + if { [llength $spacing_table] == 0} { + lappend widths_and_spacings "\{\"width_at_least\": 0.0, \"min_spacing\": $min_spacing\}" + } else { + foreach line $spacing_table { + lappend widths_and_spacings "\{\"width_at_least\": [lindex $line 1], \"min_spacing\": [lindex $line end]\}" + } + } + set output " \{" + append output "\"name\": \"$name\", " + append output "\"index\": $index, " + append output "\"direction\": \"$direction\", " + append output "\"min_width\": $min_width, " + append output "\"max_width\": $max_width, " + append output "\"pitch\": $pitch, " + append output "\"offset\": $offset, " + append output {"power_strap_widths_and_spacings": [} + append output [join $widths_and_spacings ", "] + append output "\]\}" + + lappend layers $output + } +} + +puts $json " \{" +puts $json { "name" : "TODO",} +puts $json { "metals": [} + +puts $json [join $layers ",\n"] + +puts $json " \]" +puts $json " \}" + +close $json diff --git a/hammer/power/joules/__init__.py b/hammer/power/joules/__init__.py new file mode 100644 index 000000000..908e8c0d6 --- /dev/null +++ b/hammer/power/joules/__init__.py @@ -0,0 +1,256 @@ +# hammer-vlsi plugin for Cadence Joules. +# +# See LICENSE for licence details. + +from typing import List, Dict, Optional + +import os + +from hammer.vlsi import HammerPowerTool, HammerToolStep, MMMCCornerType, FlowLevel +from hammer.logging import HammerVLSILogging + +from hammer.common.cadence import CadenceTool + + +class Joules(HammerPowerTool, CadenceTool): + + @property + def post_synth_sdc(self) -> Optional[str]: + return None + + def tool_config_prefix(self) -> str: + return "power.joules" + + @property + def env_vars(self) -> Dict[str, str]: + new_dict = dict(super().env_vars) + new_dict["JOULES_BIN"] = self.get_setting("power.joules.joules_bin") + return new_dict + + @property + def steps(self) -> List[HammerToolStep]: + return self.make_steps_from_methods([ + self.check_level, + self.init_technology, + self.init_design, + self.read_stimulus, + self.synthesize_design, + self.compute_power, + self.report_power, + self.run_joules + ]) + + def check_level(self) -> bool: + if self.level == FlowLevel.RTL or self.level == FlowLevel.SYN: + return True + else: + self.logger.error("The FlowLevel is invalid. The Joules plugin only supports RTL and post-synthesis analysis. Check your power tool setting and flow step.") + return False + + def init_technology(self) -> bool: + # libs, define RAMs, define corners + verbose_append = self.verbose_append + verbose_append("set_multi_cpu_usage -local_cpu {}".format(self.get_setting("vlsi.core.max_threads"))) + + corners = self.get_mmmc_corners() + if MMMCCornerType.Extra in list(map(lambda corner: corner.type, corners)): + for corner in corners: + if corner.type is MMMCCornerType.Extra: + verbose_append("read_libs {EXTRA_LIBS} -domain extra -infer_memory_cells".format(EXTRA_LIBS=self.get_timing_libs(corner))) + break + elif MMMCCornerType.Setup in list(map(lambda corner: corner.type, corners)): + for corner in corners: + if corner.type is MMMCCornerType.Setup: + verbose_append("read_libs {SETUP_LIBS} -domain setup -infer_memory_cells".format(SETUP_LIBS=self.get_timing_libs(corner))) + break + elif MMMCCornerType.Hold in list(map(lambda corner: corner.type, corners)): + for corner in corners: + if corner.type is MMMCCornerType.Hold: + verbose_append("read_libs {HOLD_LIBS} -domain hold -infer_memory_cells".format(HOLD_LIBS=self.get_timing_libs(corner))) + break + else: + self.logger.error("No corners found") + return False + return True + + def init_design(self) -> bool: + verbose_append = self.verbose_append + + top_module = self.get_setting("power.inputs.top_module") + tb_name = self.tb_name + # Replace . to / formatting in case argument passed from sim tool + tb_dut = self.tb_dut.replace(".", "/") + + + if self.level == FlowLevel.RTL: + # Read in the design files + verbose_append("read_hdl -sv {}".format(" ".join(self.input_files))) + + # Setup the power specification + power_spec_arg = self.map_power_spec_name() + power_spec_file = self.create_power_spec() + if not power_spec_arg or not power_spec_file: + return False + + verbose_append("read_power_intent -{tpe} {spec} -module {TOP_MODULE}".format(tpe=power_spec_arg, spec=power_spec_file, TOP_MODULE=top_module)) + + # Set options pre-elaboration + verbose_append("set_db leakage_power_effort medium") + verbose_append("set_db lp_insert_clock_gating true") + + if self.level == FlowLevel.RTL: + # Elaborate the design + verbose_append("elaborate {TOP_MODULE}".format(TOP_MODULE=top_module)) + elif self.level == FlowLevel.SYN: + # Read in the synthesized netlist + verbose_append("read_netlist {}".format(" ".join(self.input_files))) + + # Read in the post-synth SDCs + verbose_append("read_sdc {}".format(self.sdc)) + + return True + + def read_stimulus(self) -> bool: + verbose_append = self.verbose_append + + top_module = self.get_setting("power.inputs.top_module") + tb_name = self.tb_name + # Replace . to / formatting in case argument passed from sim tool + tb_dut = self.tb_dut.replace(".", "/") + + # Generate average power report for all waveforms + waveforms = self.waveforms + for i, waveform in enumerate(waveforms): + verbose_append("read_stimulus -file {WAVE} -dut_instance {TB}/{DUT} -alias {WAVE_NAME}_{NUM} -append".format(WAVE=waveform, TB=tb_name, DUT=tb_dut, WAVE_NAME=os.path.basename(waveform), NUM=i)) + + # Generate Specified and Custom Reports + reports = self.get_power_report_configs() + + for i, report in enumerate(reports): + waveform = os.path.basename(report.waveform_path) + + read_stim_cmd = "read_stimulus -file {WAVE_PATH} -dut_instance {TB}/{DUT} -append".format(WAVE_PATH=report.waveform_path, TB=tb_name, DUT=tb_dut) + + if report.start_time: + read_stim_cmd += " -start {STIME}".format(STIME=report.start_time.value_in_units("ns")) + + if report.end_time: + read_stim_cmd += " -end {ETIME}".format(ETIME=report.end_time.value_in_units("ns")) + + if report.toggle_signal: + if report.num_toggles: + read_stim_cmd += " -cycles {NUM} {SIGNAL}".format(NUM=report.num_toggles, SIGNAL=report.toggle_signal) + else: + self.logger.error("Must specify the number of toggles if the toggle signal is specified.") + return False + + if report.frame_count: + read_stim_cmd += " -frame_count {FRAME_COUNT}".format(FRAME_COUNT=report.frame_count) + + + read_stim_cmd += " -alias report_{WAVE}_{NUM}".format(WAVE=waveform, NUM=i) + + verbose_append(read_stim_cmd) + + saifs = self.get_setting("power.inputs.saifs") + for saif in saifs: + saif_basename = os.path.basename(saif) + verbose_append("read_stimulus {SAIF} -dut_instance {TB}/{DUT} -format saif -alias {NAME} -append".format(SAIF=saif, TB=tb_name, DUT=tb_dut, NAME=saif_basename)) + + return True + + + def synthesize_design(self) -> bool: + verbose_append = self.verbose_append + + if self.level == FlowLevel.RTL: + # Generate and read the SDCs + sdc_files = self.generate_sdc_files() # type: List[str] + verbose_append("read_sdc {}".format(" ".join(sdc_files))) + verbose_append("syn_power -effort medium") + + return True + + + def compute_power(self) -> bool: + verbose_append = self.verbose_append + + verbose_append("compute_power -mode time_based") + + return True + + + def report_power(self) -> bool: + verbose_append = self.verbose_append + + top_module = self.get_setting("power.inputs.top_module") + tb_name = self.tb_name + # Replace . to / formatting in case argument passed from sim tool + tb_dut = self.tb_dut.replace(".", "/") + + for i, wave in enumerate(self.waveforms): + verbose_append("report_power -stims {WAVE}_{NUM} -indent_inst -unit mW -append -out waveforms.report".format(WAVE=os.path.basename(wave), NUM=i)) + + reports = self.get_power_report_configs() + + for i, report in enumerate(reports): + waveform = os.path.basename(report.waveform_path) + + module_str = "" + if report.module: + module_str = " -module {MODULE}".format(MODULE=report.module) + + levels_str = "" + if report.levels: + levels_str = " -levels {LEVELS}".format(LEVELS=report.levels) + + stim_alias = "report_{WAVE}_{NUM}".format(WAVE=waveform, NUM=i) + if report.report_name: + report_name = report.report_name + else: + report_name = stim_alias + ".report" + + verbose_append("report_power -frames [get_sdb_frames {STIM_ALIAS}] -collate none -cols total -by_hierarchy{MODULE}{LEVELS} -indent_inst -unit mW -out {REPORT_NAME}".format( + STIM_ALIAS=stim_alias, + MODULE=module_str, + LEVELS=levels_str, + REPORT_NAME=report_name)) + + saifs = self.get_setting("power.inputs.saifs") + for saif in saifs: + saif_basename = os.path.basename(saif) + verbose_append("report_power -stims {SAIF} -indent_inst -unit mW -out {SAIF}.report".format(SAIF=saif_basename)) + + return True + + def run_joules(self) -> bool: + verbose_append = self.verbose_append + + """Close out the power script and run Joules""" + # Quit Joules + verbose_append("exit") + + # Create power analysis script + joules_tcl_filename = os.path.join(self.run_dir, "joules.tcl") + self.write_contents_to_path("\n".join(self.output), joules_tcl_filename) + + # Build args + args = [ + self.get_setting("power.joules.joules_bin"), + "-files", joules_tcl_filename, + "-common_ui" + ] + + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + + self.run_executable(args, cwd=self.run_dir) + + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + return True + + + +tool = Joules diff --git a/hammer/power/joules/defaults.yml b/hammer/power/joules/defaults.yml new file mode 100644 index 000000000..1d0d3c34a --- /dev/null +++ b/hammer/power/joules/defaults.yml @@ -0,0 +1,11 @@ +# Configuration options and defaults for Joules. +# The values specified in this file are the defaults. + +power.joules: + # Location of the binary. + joules_bin: "${cadence.cadence_home}/JLS/JLS${power.joules.version}/bin/joules" + joules_bin_meta: lazysubst # we want later overrides to be able to affect this + + # Joules version to use. + # Used to locate the binary - e.g. the '201' in ${cadence.cadence_home}/JLS/JLS201/bin/joules + version: "201" diff --git a/hammer/power/voltus/__init__.py b/hammer/power/voltus/__init__.py new file mode 100644 index 000000000..9ebc2f96a --- /dev/null +++ b/hammer/power/voltus/__init__.py @@ -0,0 +1,760 @@ +# hammer-vlsi plugin for Cadence Voltus. +# +# See LICENSE for licence details. + +from typing import List, Dict, Optional, Callable + +import os +import json + +from hammer.config import HammerJSONEncoder +from hammer.utils import in_place_unique +from hammer.vlsi import HammerPowerTool, HammerToolStep, MMMCCorner, MMMCCornerType, TimeValue, VoltageValue, FlowLevel +from hammer.logging import HammerVLSILogging +import hammer.tech as hammer_tech +from hammer.tech.specialcells import CellType +from hammer.common.cadence import CadenceTool + + +class Voltus(HammerPowerTool, CadenceTool): + @property + def post_synth_sdc(self) -> Optional[str]: + # No post-synth SDC input for power... + return None + + def tool_config_prefix(self) -> str: + return "power.voltus" + + @property + def env_vars(self) -> Dict[str, str]: + new_dict = dict(super().env_vars) + new_dict["VOLTUS_BIN"] = self.get_setting("power.voltus.voltus_bin") + return new_dict + + @property + def extra_corners_only(self) -> bool: + return self.get_setting("power.inputs.extra_corners_only") + + @property + def tech_stdcell_pgv_tcl(self) -> str: + return os.path.join(self.run_dir, "tech_stdcell_pgv.tcl") + + @property + def macro_pgv_tcl(self) -> str: + return os.path.join(self.run_dir, "macro_pgv.tcl") + + @property + def tech_lib_dir(self) -> str: + return os.path.join(self.technology.cache_dir, "tech_pgv") + + @property + def stdcell_lib_dir(self) -> str: + return os.path.join(self.technology.cache_dir, "stdcell_pgv") + + @property + def macro_lib_dir(self) -> str: + return os.path.join(self.technology.cache_dir, "macro_pgv") + + @property + def ran_tech_stdcell_pgv(self) -> bool: + """ init_technology sets this to True if tech/stdcell PG views were generated by Hammer or provided by PDK """ + return self.attr_getter("_ran_tech_stdcell_pgv", False) + + @ran_tech_stdcell_pgv.setter + def ran_tech_stdcell_pgv(self, val: bool) -> None: + self.attr_setter("_ran_tech_stdcell_pgv", val) + + @property + def gen_tech_stdcell_pgv(self) -> bool: + """ init_technology sets this to True if stdcell PG views need to be generated """ + return self.attr_getter("_gen_tech_stdcell_pgv", False) + + @gen_tech_stdcell_pgv.setter + def gen_tech_stdcell_pgv(self, val: bool) -> None: + self.attr_setter("_gen_tech_stdcell_pgv", val) + + @property + def ran_macro_pgv(self) -> bool: + """ init_technology sets this to True if macro PG views were generated by Hammer """ + return self.attr_getter("_ran_macro_pgv", False) + + @ran_macro_pgv.setter + def ran_macro_pgv(self, val: bool) -> None: + self.attr_setter("_ran_macro_pgv", val) + + @property + def gen_macro_pgv(self) -> bool: + """ init_technology sets this to True if macro PG views need to be generated """ + return self.attr_getter("_gen_macro_pgv", False) + + @gen_macro_pgv.setter + def gen_macro_pgv(self, val: bool) -> None: + self.attr_setter("_gen_macro_pgv", val) + + @property + def macro_pgv_cells(self) -> List[str]: + """ init_technology populates the list of macros to generate PG views for """ + return self.attr_getter("_macro_pgv_cells", []) + + @macro_pgv_cells.setter + def macro_pgv_cells(self, val: List[str]) -> None: + self.attr_setter("_macro_pgv_cells", val) + + def tech_lib_filter(self) -> List[Callable[[hammer_tech.Library], bool]]: + """ Filter only libraries from tech plugin """ + return [self.filter_for_tech_libs] + + def filter_for_tech_libs(self, lib: hammer_tech.Library) -> bool: + return lib in self.technology.tech_defined_libraries + + def extra_lib_filter(self) -> List[Callable[[hammer_tech.Library], bool]]: + """ Filter only libraries from vlsi.inputs.extra_libraries """ + return [self.filter_for_extra_libs] + + def filter_for_extra_libs(self, lib: hammer_tech.Library) -> bool: + return lib in list(map(lambda el: el.store_into_library(), self.technology.get_extra_libraries())) + + def get_mmmc_pgv(self, corner: MMMCCorner) -> List[str]: + return self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=[ + self.filter_for_mmmc(voltage=corner.voltage, temp=corner.temp)]) + + def get_mmmc_spice_models(self, corner: MMMCCorner) -> List[str]: + return self.technology.read_libs([hammer_tech.filters.spice_model_file_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=[ + self.filter_for_mmmc(voltage=corner.voltage, temp=corner.temp)]) + + def get_mmmc_spice_corners(self, corner: MMMCCorner) -> List[str]: + return self.technology.read_libs([hammer_tech.filters.spice_model_lib_corner_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=[ + self.filter_for_mmmc(voltage=corner.voltage, temp=corner.temp)], + must_exist=False) + + @property + def steps(self) -> List[HammerToolStep]: + return self.make_steps_from_methods([ + self.check_level, + self.init_technology, + self.init_design, + self.static_power, + self.active_power, + self.static_rail, + self.active_rail, + self.run_voltus + ]) + + def check_level(self) -> bool: + if self.level == FlowLevel.PAR: + return True + else: + self.logger.error("The FlowLevel is invalid. The Voltus plugin only supports post-par analysis. Check your power tool setting and flow step.") + return False + + + def init_technology(self) -> bool: + corners = self.get_mmmc_corners() + + # Options for set_pg_library_mode + base_options = ["-enable_distributed_processing", "true"] # type: List[str] + if self.get_setting("power.voltus.lef_layer_map"): + base_options.extend(["-lef_layer_map", self.get_setting("power.voltus.lef_layer_map")]) + + # Setup commands for each PG library run + base_cmds = ["set_db design_process_node {}".format(self.get_setting("vlsi.core.node"))] + base_cmds.append("set_multi_cpu_usage -local_cpu {}".format(self.get_setting("vlsi.core.max_threads"))) + + # First, check if tech plugin supplies power grid libraries + tech_pg_libs = self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.tech_lib_filter()) + tech_lib_lefs = self.technology.read_libs([hammer_tech.filters.lef_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.tech_lib_filter()) + if len(tech_pg_libs) > 0: + self.logger.info("Technology already provides PG libraries. Moving onto macro PG libraries.") + # Else, characterize tech & stdcell libraries only once + elif not os.path.isdir(self.tech_lib_dir) or not os.path.isdir(self.stdcell_lib_dir): + self.logger.info("Generating techonly and stdcell PG libraries for the first time...") + ts_output = base_cmds.copy() + # Get only the tech-defined libraries + ts_output.append("read_physical -lef {{ {} }}".format(" ".join(tech_lib_lefs))) + + tech_options = base_options.copy() + tech_options.extend(["-cell_type", "techonly"]) + # fillers, decaps + stdfillers = self.technology.get_special_cell_by_type(CellType.StdFiller) + decaps = self.technology.get_special_cell_by_type(CellType.Decap) + tech_lib_sp = self.technology.read_libs([hammer_tech.filters.spice_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.tech_lib_filter()) + + if not corners: + # Start with tech-only library + options = tech_options.copy() + options.extend([ + "-extraction_tech_file", self.get_qrc_tech(), # TODO: this assumes only 1 exists in no corners case + "-default_power_voltage", str(VoltageValue(self.get_setting("vlsi.inputs.supplies.VDD")).value_in_units("V")) + ]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(self.tech_lib_dir)) + + # Next do stdcell library + options[options.index("techonly")] = "stdcells" + if len(stdfillers) > 0: + stdfillers_names = list(map(lambda f: str(f), stdfillers[0].name)) + options.extend(["-filler_cells", "{{ {} }} ".format(" ".join(stdfillers_names))]) + if len(decaps) > 0: + decaps_names = list(map(lambda d: str(d), decaps[0].name)) + options.extend(["-decap_cells", "{{ {} }}".format(" ".join(decaps_names))]) + spice_models = self.technology.read_libs([hammer_tech.filters.spice_model_file_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + spice_corners = self.technology.read_libs([hammer_tech.filters.spice_model_lib_corner_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate stdcell PG libraries! Skipping.") + return True + else: + options.extend(["-spice_models", "{", " ".join(spice_models), "}"]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{{", "} {".join(spice_corners), "}}"]) + if len(decaps) > 0 and len(tech_lib_sp) == 0: + self.logger.error("Must have Spice netlists in tech plugin for decap characterization in stdcell PG library! Skipping.") + return True + else: + options.extend(["-spice_subckts", "{{ {} }}".format(" ".join(tech_lib_sp))]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(self.stdcell_lib_dir)) + + else: + for corner in corners: + # Start with tech-only library + options = tech_options.copy() + options.extend([ + "-extraction_tech_file", self.get_mmmc_qrc(corner), #TODO: QRC should be tied to stackup + "-default_power_voltage", str(corner.voltage.value), + "-temperature", str(corner.temp.value) + ]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(os.path.join(self.tech_lib_dir, corner.name))) + + # Next do stdcell library + options[options.index("techonly")] = "stdcells" + if len(stdfillers) > 0: + stdfillers_names = list(map(lambda f: str(f), stdfillers[0].name)) + options.extend(["-filler_cells", "{{ {} }} ".format(" ".join(stdfillers_names))]) + if len(decaps) > 0: + decaps_names = list(map(lambda d: str(d), decaps[0].name)) + options.extend(["-decap_cells", "{{ {} }}".format(" ".join(decaps_names))]) + spice_models = self.get_mmmc_spice_models(corner) + spice_corners = self.get_mmmc_spice_corners(corner) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate stdcell PG libraries! Skipping.") + return True + else: + options.extend(["-spice_models", "{", " ".join(spice_models), "}"]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{{", "} {".join(spice_corners), "}}"]) + if len(decaps) > 0 and len(tech_lib_sp) == 0: + self.logger.error("Must have Spice netlists in tech plugin for decap characterization in stdcell PG library! Skipping.") + return True + else: + options.extend(["-spice_subckts", "{{ {} }}".format(" ".join(tech_lib_sp))]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(os.path.join(self.stdcell_lib_dir, corner.name))) + + ts_output.append("exit") + self.write_contents_to_path("\n".join(ts_output), self.tech_stdcell_pgv_tcl) + self.gen_tech_stdcell_pgv = True + self.ran_tech_stdcell_pgv = True + else: + self.logger.info("techonly and stdcell PG libraries already generated, skipping...") + self.ran_tech_stdcell_pgv = True + + if self.get_setting("power.voltus.macro_pgv"): + m_output = base_cmds.copy() + # Characterize macro libraries once, unless list of extra libraries has been modified/changed + tech_lef = tech_lib_lefs[0] + extra_lib_lefs = self.technology.read_libs([hammer_tech.filters.lef_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + extra_lib_mtimes = list(map(lambda l: os.path.getmtime(l), extra_lib_lefs)) + extra_lib_lefs_mtimes = dict(zip(extra_lib_lefs, extra_lib_mtimes)) + extra_lib_lefs_json = os.path.join(self.run_dir, "extra_lib_lefs.json") + extra_pg_libs = self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + # TODO: Use some filters w/ LEFUtils to extract cells from LEFs, e.g. MacroSize instead of using name field + named_extra_libs = list(filter(lambda l: l.library.name is not None and l.library.power_grid_library not in extra_pg_libs, self.technology.get_extra_libraries())) # type: List[hammer_tech.ExtraLibrary] + + if not os.path.isdir(self.macro_lib_dir): + self.logger.info("Characterizing macros for the first time...") + # First time: characterize all cells + # macros = list(map(lambda l: l.library.name, named_extra_libs)) + macros = [l.library.name for l in named_extra_libs if l.library.name is not None] + in_place_unique(macros) + self.macro_pgv_cells = macros + + # Write dict of extra library LEFs + with open(extra_lib_lefs_json, "w") as f: + f.write(json.dumps(extra_lib_lefs_mtimes, cls=HammerJSONEncoder, indent=4)) + else: + # Figure out which cells to re-characterize + prior_extra_lib_lefs = {} # type: Dict[str, str] + if os.path.exists(extra_lib_lefs_json): + with open(extra_lib_lefs_json, "r") as f: + prior_extra_lib_lefs = json.loads(f.read()) + # Write updated dict of extra library LEFs + with open(extra_lib_lefs_json, "w") as f: + f.write(json.dumps(extra_lib_lefs_mtimes, cls=HammerJSONEncoder, indent=4)) + # Get LEFs which have been created/modified, match cell names if provided + # TODO: these types don't line up, doubtful whether this code works as expected + mod_lefs = dict(set(extra_lib_lefs_mtimes.items()) - set(prior_extra_lib_lefs.items())).keys() # type: ignore + rechar_libs = list(filter(lambda l: l.library.lef_file in mod_lefs, named_extra_libs)) + macros = list(map(lambda l: l.library.name, rechar_libs)) # type: ignore + in_place_unique(macros) + self.macro_pgv_cells = macros + + if len(self.macro_pgv_cells) > 0: + self.logger.info("Characterizing the following macros: {}".format(" ".join(self.macro_pgv_cells))) + # Write list of cells to characterize + cells_list = os.path.join(self.run_dir, "macro_cells.txt") + with open(cells_list, "w") as f: + f.write("\n".join(self.macro_pgv_cells)) + + macro_options = base_options.copy() + macro_options.extend([ + "-cell_type", "macros", + "-cells_file", cells_list + ]) + + # File checks + gds_map_file = self.get_gds_map_file() + if gds_map_file is None: + self.logger.error("Must have GDS layer map for macro PG library generation! Skipping.") + return True + else: + assert isinstance(gds_map_file, str) + macro_options.extend(["-stream_layer_map", gds_map_file]) + + extra_lib_sp = self.technology.read_libs([hammer_tech.filters.spice_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + if len(extra_lib_sp) == 0: + self.logger.error("Must have Spice netlists for macro PG library generation! Skipping.") + return True + else: + macro_options.extend(["-spice_subckts", "{{ {} }}".format(" ".join(extra_lib_sp))]) + + extra_lib_gds = self.technology.read_libs([hammer_tech.filters.gds_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + if len(extra_lib_gds) == 0: + self.logger.error("Must have GDS data for macro PG library generation! Skipping.") + return True + else: + macro_options.extend(["-stream_files", "{{ {} }}".format(" ".join(extra_lib_gds))]) + + m_output.append("read_physical -lef {{ {TECH_LEF} {EXTRA_LEFS} }}".format(TECH_LEF=tech_lef, EXTRA_LEFS=" ".join(extra_lib_lefs))) + + if not corners: + options = macro_options.copy() + options.extend([ + "-extraction_tech_file", self.get_qrc_tech(), # TODO: this assumes only 1 exists in no corners case + "-default_power_voltage", str(VoltageValue(self.get_setting("vlsi.inputs.supplies.VDD")).value_in_units("V")) + ]) + spice_models = self.technology.read_libs([hammer_tech.filters.spice_model_file_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + spice_corners = self.technology.read_libs([hammer_tech.filters.spice_model_lib_corner_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate macro PG libraries") + return True + else: + options.extend(["-spice_models", "{", " ".join(spice_models), "}"]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{{", "} {".join(spice_corners), "}}"]) + m_output.append("set_pg_library_mode {}".format(" ".join(options))) + m_output.append("write_pg_library -out_dir {}".format(os.path.join(self.macro_lib_dir, corner.name))) + + else: + for corner in corners: + options = macro_options.copy() + options.extend([ + "-extraction_tech_file", self.get_mmmc_qrc(corner), #TODO: QRC should be tied to stackup + "-default_power_voltage", str(corner.voltage.value), + "-temperature", str(corner.temp.value), + ]) + spice_models = self.get_mmmc_spice_models(corner) + spice_corners = self.get_mmmc_spice_corners(corner) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate macro PG libraries") + return True + else: + options.extend(["-spice_models", "{", " ".join(spice_models), "}"]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{{", "} {".join(spice_corners), "}}"]) + m_output.append("set_pg_library_mode {}".format(" ".join(options))) + m_output.append("write_pg_library -out_dir {}".format(os.path.join(self.macro_lib_dir, corner.name))) + + m_output.append("exit") + self.write_contents_to_path("\n".join(m_output), self.macro_pgv_tcl) + self.gen_macro_pgv = True + self.ran_macro_pgv = True + else: + self.logger.info("macro PG libraries already generated and macros have not changed, skipping...") + self.ran_macro_pgv = True + else: + self.logger.info("power.voltus.macro_pgv is False. Rail analysis will be incomplete over macro blocks.") + + return True + + def init_design(self) -> bool: + verbose_append = self.verbose_append + + verbose_append("set_db design_process_node {}".format(self.get_setting("vlsi.core.node"))) + verbose_append("set_multi_cpu_usage -local_cpu {}".format(self.get_setting("vlsi.core.max_threads"))) + + innovus_db = os.path.join(os.getcwd(), self.flow_database) + if innovus_db is None or not os.path.isdir(innovus_db): + raise ValueError("Innovus database %s not found" % (innovus_db)) + + verbose_append("read_db {}".format(innovus_db)) + + verbose_append("check_pg_shorts -out_file shorts.rpt") + + # TODO (daniel) deal with multiple power domains + for power_net in self.get_all_power_nets(): + vdd_net = power_net.name + for gnd_net in self.get_all_ground_nets(): + vss_net = gnd_net.name + verbose_append("set_power_pads -net {VDD} -format defpin".format(VDD=vdd_net)) + verbose_append("set_power_pads -net {VSS} -format defpin".format(VSS=vss_net)) + + # Check that SPEFs exist + if len(self.spefs) == 0: + self.logger.error("No spef files specified for power analysis") + return False + + corners = self.get_mmmc_corners() + if corners: + setup_view_names = [] # type: List[str] + hold_view_names = [] # type: List[str] + extra_view_names = [] # type: List[str] + rc_corners = [] # type: List[str] + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + corner_name = "{n}.{t}".format(n=corner.name, t="setup") + setup_view_names.append("{n}_view".format(n=corner_name)) + elif corner.type is MMMCCornerType.Hold: + corner_name = "{n}.{t}".format(n=corner.name, t="hold") + hold_view_names.append("{n}_view".format(n=corner_name)) + elif corner.type is MMMCCornerType.Extra: + corner_name = "{n}.{t}".format(n=corner.name, t="extra") + extra_view_names.append("{n}_view".format(n=corner_name)) + else: + raise ValueError("Unsupported MMMCCornerType") + rc_corners.append("{n}_rc".format(n=corner_name)) + + # Apply analysis views + # TODO: should not need to analyze extra views as well. Defaulting to hold for now (min. runtime impact). + verbose_append("set_analysis_view -setup {{ {setup_views} }} -hold {{ {hold_views} {extra_views} }}".format( + setup_views=" ".join(setup_view_names), + hold_views=" ".join(hold_view_names), + extra_views=" ".join(extra_view_names) + )) + # Match spefs with corners. Ordering must match (ensured here by get_mmmc_corners())! + for (spef, rc_corner) in zip(self.spefs, rc_corners): + verbose_append("read_spef {spef} -rc_corner {corner}".format(spef=os.path.join(os.getcwd(), spef), corner=rc_corner)) + + else: + # TODO: remove hardcoded my_view string + analysis_view_name = "my_view" + verbose_append("set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }}".format( + setup_view=analysis_view_name, + hold_view=analysis_view_name + )) + verbose_append("read_spef " + os.path.join(os.getcwd(), self.spefs[0])) + + return True + + def static_power(self) -> bool: + verbose_append = self.verbose_append + + verbose_append("set_db power_method static") + verbose_append("set_db power_write_static_currents true") + verbose_append("set_db power_write_db true") + + # Report based on MMMC mode + corners = self.get_mmmc_corners() + if not corners: + if self.extra_corners_only: + self.logger.warning("power.inputs.extra_corners_only not valid in non-MMMC mode! Reporting static power for default analysis view only.") + verbose_append("report_power -out_dir staticPowerReports") + else: + if self.extra_corners_only: + extra_corners = list(filter(lambda c: c.type is MMMCCornerType.Extra, corners)) + if len(extra_corners) == 0: + self.logger.warning("power.inputs.extra_corners_only is true but no extra MMMC corners specified! Ignoring for static power.") + else: + corners = extra_corners + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + view_name = "{c}.setup_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Hold: + view_name = "{c}.hold_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Extra: + view_name = "{c}.extra_view".format(c=corner.name) + else: + raise ValueError("Unsupported MMMCCornerType") + verbose_append("report_power -view {VIEW} -out_dir staticPowerReports.{VIEW}".format(VIEW=view_name)) + + return True + + def active_power(self) -> bool: + verbose_append = self.verbose_append + + # Active Vectorless Power Analysis + verbose_append("set_db power_method dynamic_vectorless") + # TODO (daniel) add the resolution as an option? + verbose_append("set_dynamic_power_simulation -resolution 500ps") + + # Check MMMC mode + corners = self.get_mmmc_corners() + if not corners: + if self.extra_corners_only: + self.logger.warning("power.inputs.extra_corners_only not valid in non-MMMC mode! Reporting active power for default analysis view only.") + verbose_append("report_power -out_dir activePowerReports") + else: + if self.extra_corners_only: + extra_corners = list(filter(lambda c: c.type is MMMCCornerType.Extra, corners)) + if len(extra_corners) == 0: + self.logger.warning("power.inputs.extra_corners_only is true but no extra MMMC corners specified! Ignoring for active power.") + else: + corners = extra_corners + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + view_name = "{c}.setup_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Hold: + view_name = "{c}.hold_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Extra: + view_name = "{c}.extra_view".format(c=corner.name) + else: + raise ValueError("Unsupported MMMCCornerType") + verbose_append("report_power -view {VIEW} -out_dir activePowerReports.{VIEW}".format(VIEW=view_name)) + + # TODO (daniel) deal with different tb/dut hierarchies + tb_name = self.get_setting("power.inputs.tb_name") + tb_dut = self.get_setting("power.inputs.tb_dut") + tb_scope = "{}/{}".format(tb_name, tb_dut) + + # TODO: These times should be either auto calculated/read from the inputs or moved into the same structure as a tuple + start_times = self.get_setting("power.inputs.start_times") + end_times = self.get_setting("power.inputs.end_times") + + # Active Vectorbased Power Analysis + verbose_append("set_db power_method dynamic_vectorbased") + waveform_format_map = {".vcd": "vcd", + ".vpd": "vcd", + ".fsdb": "fsdb", + ".shm": "shm", + ".trn": "shm"} + for waveform_path, waveform_stime, waveform_etime in zip(self.waveforms, start_times, end_times): + stime_ns = TimeValue(waveform_stime).value_in_units("ns") + etime_ns = TimeValue(waveform_etime).value_in_units("ns") + # Set format intelligently based on file extension. Strip .gz if present. + waveform_ext = os.path.splitext(waveform_path.rstrip(".gz"))[1].lower() + if waveform_format_map.get(waveform_ext) is None: + self.logger.error("Only VCD/VPD, FSDB, and SHM waveform formats supported.") + verbose_append("read_activity_file -reset -format {FORMAT} {WAVEFORM_PATH} -start {stime}ns -end {etime}ns -scope {TESTBENCH}".format(FORMAT=waveform_format_map.get(waveform_ext), WAVEFORM_PATH=os.path.join(os.getcwd(), waveform_path), TESTBENCH=tb_scope, stime=stime_ns, etime=etime_ns)) + waveform_file = os.path.basename(waveform_path) + # Report based on MMMC mode + if not corners: + verbose_append("report_power -out_dir activePower.{WAVEFORM_FILE}".format(WAVEFORM_FILE=waveform_file)) + else: + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + view_name = "{c}.setup_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Hold: + view_name = "{c}.hold_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Extra: + view_name = "{c}.extra_view".format(c=corner.name) + else: + raise ValueError("Unsupported MMMCCornerType") + verbose_append("report_power -view {VIEW} -out_dir activePowerReports.{WAVEFORM_FILE}.{VIEW}".format(VIEW=view_name, WAVEFORM_FILE=waveform_file)) + + verbose_append("report_vector_profile -detailed_report true -out_file activePowerProfile.{WAVEFORM_FILE}".format(WAVEFORM_FILE=waveform_file)) + + verbose_append("set_db power_method dynamic") + for saif_path in self.saifs: + verbose_append("set_dynamic_power_simulation -reset") + verbose_append("read_activity_file -reset -format SAIF {SAIF_PATH} -scope {TESTBENCH}".format(SAIF_PATH=os.path.join(os.getcwd(), saif_path), TESTBENCH=tb_scope)) + saif_file=".".join(saif_path.split('/')[-2:]) + # Report based on MMMC mode + if not corners: + verbose_append("report_power -out_dir activePower.{SAIF_FILE}".format(SAIF_FILE=saif_file)) + else: + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + view_name = "{c}.setup_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Hold: + view_name = "{c}.hold_view".format(c=corner.name) + elif corner.type is MMMCCornerType.Extra: + view_name = "{c}.extra_view".format(c=corner.name) + else: + raise ValueError("Unsupported MMMCCornerType") + verbose_append("report_power -view {VIEW} -out_dir activePowerReports.{SAIF_FILE}.{VIEW}".format(VIEW=view_name, SAIF_FILE=saif_file)) + + return True + + def rail_analysis(self, method: str, power_dir: str, output_dir: Optional[str] = None) -> bool: + """ + Generic method for rail analysis + params: + - method: "static" or "dynamic" + - power_dir: relative path to static or active power current files + - output_dir: relative path to rail analysis output dir + """ + verbose_append = self.verbose_append + + if not output_dir: + output_dir = method + "RailReports" + + # Decide accuracy based on existence of PGV libraries, unless overridden + accuracy = self.get_setting("power.voltus.rail_accuracy") + if not accuracy: + accuracy = "hd" if self.ran_tech_stdcell_pgv else "xd" # hd still works w/o macro PG views + + base_options = [ + "-method", method, + "-accuracy", accuracy, + "-process_techgen_em_rules", "true", + "-em_peak_analysis", "true", + "-enable_rlrp_analysis", "true", + "-gif_resolution", "high", + "-verbosity", "true" + ] + if method == "static": + base_options.extend(["-enable_sensitivity_analysis", "true"]) + + # TODO: Need combinations of all power nets + voltage domains + pg_nets = self.get_all_power_nets() + self.get_all_ground_nets() + # Report based on MMMC corners + corners = self.get_mmmc_corners() + if not corners: + if self.extra_corners_only: + self.logger.warning("power.inputs.extra_corners_only not valid in non-MMMC mode! Reporting rail analysis for default analysis view only.") + options = base_options.copy() + pg_libs = self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + if self.ran_tech_stdcell_pgv: + pg_libs.append(os.path.join(self.tech_lib_dir, "techonly.cl")) + pg_libs.append(os.path.join(self.stdcell_lib_dir, "stdcells.cl")) + if self.ran_macro_pgv: + pg_libs.extend(list(map(lambda l: os.path.join(self.macro_lib_dir, "macros_{}.cl".format(l)), self.macro_pgv_cells))) + if len(pg_libs) == 0: + self.logger.warning("No PG libraries are available! Rail analysis is skipped.") + return True + options.extend(["-power_grid_libraries", "{{ {} }}".format(" ".join(pg_libs))]) + verbose_append("set_rail_analysis_config {}".format(" ".join(options))) + # TODO: get nets and .ptiavg files using TCL from the .ptifiles file in the power reports directory + power_data = list(map(lambda n: "{POWER_DIR}/{METHOD}_{NET}.ptiavg".format( + POWER_DIR=power_dir, + METHOD=method, + NET=n.name), pg_nets)) + verbose_append("set_power_data -format current {{ {} }}".format(" ".join(power_data))) + verbose_append("report_rail -output_dir {} -type domain ALL".format(output_dir)) + # TODO: Find highest run number, increment by 1 to enable reporting IRdrop regions + else: + if self.extra_corners_only: + extra_corners = list(filter(lambda c: c.type is MMMCCornerType.Extra, corners)) + if len(extra_corners) == 0: + self.logger.warning("power.inputs.extra_corners_only is true but no extra MMMC corners specified! Ignoring for rail analysis.") + else: + corners = extra_corners + for corner in corners: + options = base_options.copy() + if corner.type is MMMCCornerType.Setup: + view_name = corner.name + ".setup_view" + elif corner.type is MMMCCornerType.Hold: + view_name = corner.name + ".hold_view" + elif corner.type is MMMCCornerType.Extra: + view_name = corner.name + ".extra_view" + else: + raise ValueError("Unsupported MMMCCornerType") + pg_libs = self.get_mmmc_pgv(corner) + if self.ran_tech_stdcell_pgv: + pg_libs.append(os.path.join(self.tech_lib_dir, corner.name, "techonly.cl")) + pg_libs.append(os.path.join(self.stdcell_lib_dir, corner.name, "stdcells.cl")) + if self.ran_macro_pgv: + pg_libs.extend(list(map(lambda l: os.path.join(self.macro_lib_dir, corner.name, "macros_{}.cl".format(l)), self.macro_pgv_cells))) + if len(pg_libs) == 0: + self.logger.warning("No PG libraries are available! Rail analysis is skipped.") + return True + + options.extend([ + "-power_grid_libraries", "{{ {} }}".format(" ".join(pg_libs)), + "-analysis_view", view_name, + "-temperature", str(corner.temp.value) + ]) + verbose_append("set_rail_analysis_config {}".format(" ".join(options))) + verbose_append("set_power_data -reset") + # TODO: get nets and .ptiavg files using TCL from the .ptifiles file in the power reports directory + power_data = list(map(lambda n: "{POWER_DIR}.{VIEW}/{METHOD}_{NET}.ptiavg".format( + POWER_DIR=power_dir, + VIEW=view_name, + METHOD=method, + NET=n.name), pg_nets)) + verbose_append("set_power_data -format current {{ {} }}".format(" ".join(power_data))) + verbose_append("report_rail -output_dir {} -type domain ALL".format(output_dir)) + # TODO: Find highest run number, increment by 1 to enable reporting IRdrop regions + + return True + + def static_rail(self) -> bool: + return self.rail_analysis("static", "staticPowerReports") + + def active_rail(self) -> bool: + # Vectorless database + passed = self.rail_analysis("dynamic", "activePowerReports", "activeRailReports") + + # Vectorbased databases + for waveform_path in self.waveforms: + passed = self.rail_analysis("dynamic", "activePower." + os.path.basename(waveform_path), "activeRailReports." + os.path.basename(waveform_path)) + for saif_path in self.saifs: + saif_file=".".join(saif_path.split('/')[-2:]) + passed = self.rail_analysis("dynamic", "activePower." + saif_file, "activeRailReports." + saif_file) + return passed + + def run_voltus(self) -> bool: + verbose_append = self.verbose_append + + """Close out the power script and run Voltus""" + # Quit Voltus + verbose_append("exit") + + # Create power analysis script + power_tcl_filename = os.path.join(self.run_dir, "power.tcl") + self.write_contents_to_path("\n".join(self.output), power_tcl_filename) + + # Build args + base_args = [ + self.get_setting("power.voltus.voltus_bin"), + "-no_gui", + "-common_ui", + "-init" + ] + + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + + # Run PG lib gen, if needed + if self.gen_tech_stdcell_pgv: + args = base_args.copy() + args.append(self.tech_stdcell_pgv_tcl) + self.run_executable(args, cwd=self.run_dir) + if self.gen_macro_pgv: + args = base_args.copy() + args.append(self.macro_pgv_tcl) + self.run_executable(args, cwd=self.run_dir) + + args = base_args.copy() + args.append(power_tcl_filename) + self.run_executable(args, cwd=self.run_dir) + + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + return True + + + +tool = Voltus diff --git a/hammer/power/voltus/defaults.yml b/hammer/power/voltus/defaults.yml new file mode 100644 index 000000000..b1b211cf6 --- /dev/null +++ b/hammer/power/voltus/defaults.yml @@ -0,0 +1,27 @@ +# Configuration options and defaults for Voltus. +# The values specified in this file are the defaults. + +power.voltus: + # Location of the binary. + voltus_bin: "${cadence.cadence_home}/SSV/SSV${power.voltus.version}/bin/voltus" + voltus_bin_meta: lazysubst # we want later overrides to be able to affect this + + # Voltus version to use. + # Used to locate the binary - e.g. the '181' in ${cadence.cadence_home}/VOLTUS/VOLTUS181/bin/voltus + version: "211" + + # Optional LEF <-> QRC layer mapping file if layers within are mismatched + # Should be provided by the technology in a tab-separated 4-column format + # Column 1: layer type (metal, via) + # Column 2: layer name in QRC tech file + # Column 3: lefdef (exact string) + # Column 4: layer name in tech LEF file + lef_layer_map: null + + # If true, generate power grid views of the macros contained in vlsi.inputs.extra_libraries + # Each library is required to have a name. If the library has a power grid view provided, they are skipped. + macro_pgv: true + + # Accuracy for rail analysis (choices are "xd" and "hd") + # If left null, accuracy will be selected based on which PG views can be generated + rail_accuracy: null diff --git a/hammer/sim/vcs/__init__.py b/hammer/sim/vcs/__init__.py new file mode 100644 index 000000000..ea552c664 --- /dev/null +++ b/hammer/sim/vcs/__init__.py @@ -0,0 +1,391 @@ +# hammer-vlsi plugin for Synopsys VCS +# +# See LICENSE for license details. + +from hammer.vlsi import HammerSimTool, HammerToolStep, HammerLSFSubmitCommand, HammerLSFSettings +from hammer.common.synopsys import SynopsysTool +from hammer.logging import HammerVLSILogging + +from typing import Dict, List, Optional, Callable, Tuple + +from hammer.vlsi import FlowLevel, TimeValue + +import hammer.utils as hammer_utils +import hammer.tech as hammer_tech +from hammer.tech import HammerTechnologyUtils + +import os +import re +import shutil +import json +from multiprocessing import Process + +class VCS(HammerSimTool, SynopsysTool): + + def tool_config_prefix(self) -> str: + return "sim.vcs" + + def fill_outputs(self) -> bool: + # TODO: support automatic waveform generation in a similar fashion to SAIFs + self.output_waveforms = [] + self.output_saifs = [] + self.output_top_module = self.top_module + self.output_tb_name = self.get_setting("sim.inputs.tb_name") + self.output_tb_dut = self.get_setting("sim.inputs.tb_dut") + self.output_level = self.get_setting("sim.inputs.level") + if self.get_setting("sim.inputs.saif.mode") != "none": + if not self.benchmarks: + self.output_saifs.append(os.path.join(self.run_dir, "ucli.saif")) + for benchmark in self.benchmarks: + self.output_saifs.append(os.path.join(self.benchmark_run_dir(benchmark), "ucli.saif")) + return True + + @property + def steps(self) -> List[HammerToolStep]: + return self.make_steps_from_methods([ + self.write_gl_files, + self.run_vcs, + self.run_simulation + ]) + + def benchmark_run_dir(self, bmark_path: str) -> str: + """Generate a benchmark run directory.""" + # TODO(ucb-bar/hammer#462) this method should be passed the name of the bmark rather than its path + bmark = os.path.basename(bmark_path) + return os.path.join(self.run_dir, bmark) + + @property + def force_regs_file_path(self) -> str: + return os.path.join(self.run_dir, "force_regs.ucli") + + @property + def access_tab_file_path(self) -> str: + return os.path.join(self.run_dir, "access.tab") + + @property + def simulator_executable_path(self) -> str: + return os.path.join(self.run_dir, "simv") + + @property + def run_tcl_path(self) -> str: + return os.path.join(self.run_dir, "run.tcl") + + @property + def env_vars(self) -> Dict[str, str]: + v = dict(super().env_vars) + v["VCS_HOME"] = self.get_setting("sim.vcs.vcs_home") + v["VERDI_HOME"] = self.get_setting("sim.vcs.verdi_home") + v["SNPSLMD_LICENSE_FILE"] = self.get_setting("synopsys.SNPSLMD_LICENSE_FILE") + return v + + def get_verilog_models(self) -> List[str]: + verilog_sim_files = self.technology.read_libs([ + hammer_tech.filters.verilog_sim_filter + ], hammer_tech.HammerTechnologyUtils.to_plain_item) + return verilog_sim_files + + def write_gl_files(self) -> bool: + if self.level == FlowLevel.RTL: + return True + + tb_prefix = self.get_setting("sim.inputs.tb_dut") + force_val = self.get_setting("sim.inputs.gl_register_force_value") + + abspath_seq_cells = os.path.join(os.getcwd(), self.seq_cells) + if not os.path.isfile(abspath_seq_cells): + self.logger.error("List of seq cells json not found as expected at {0}".format(self.seq_cells)) + + with open(self.access_tab_file_path, "w") as f: + with open(abspath_seq_cells) as seq_file: + seq_json = json.load(seq_file) + assert isinstance(seq_json, List), "list of all sequential cells should be a json list of strings not {}".format(type(seq_json)) + for cell in seq_json: + f.write("acc=wn:{cell_name}\n".format(cell_name=cell)) + + abspath_all_regs = os.path.join(os.getcwd(), self.all_regs) + if not os.path.isfile(abspath_all_regs): + self.logger.error("List of all regs json not found as expected at {0}".format(self.all_regs)) + + with open(self.force_regs_file_path, "w") as f: + with open(abspath_all_regs) as reg_file: + reg_json = json.load(reg_file) + assert isinstance(reg_json, List), "list of all sequential cells should be a json list of dictionaries from string to string not {}".format(type(reg_json)) + for reg in sorted(reg_json, key=lambda r: len(r["path"])): # TODO: This is a workaround for a bug in P-2019.06 + path = reg["path"] + path = '.'.join(path.split('/')) + pin = reg["pin"] + f.write("force -deposit {" + tb_prefix + "." + path + " ." + pin + "} " + str(force_val) + "\n") + + return True + + def run_vcs(self) -> bool: + # run through inputs and append to CL arguments + vcs_bin = self.get_setting("sim.vcs.vcs_bin") + if not os.path.isfile(vcs_bin): + self.logger.error("VCS binary not found as expected at {0}".format(vcs_bin)) + return False + + if not self.check_input_files([".v", ".v.gz", ".sv", ".so", ".cc", ".c"]): + return False + + # We are switching working directories and we still need to find paths + abspath_input_files = list(map(lambda name: os.path.join(os.getcwd(), name), self.input_files)) + + top_module = self.top_module + compiler_cc_opts = self.get_setting("sim.inputs.compiler_cc_opts", []) + compiler_ld_opts = self.get_setting("sim.inputs.compiler_ld_opts", []) + # TODO(johnwright) sanity check the timescale string + timescale = self.get_setting("sim.inputs.timescale") + options = self.get_setting("sim.inputs.options", []) + defines = self.get_setting("sim.inputs.defines", []) + access_tab_filename = self.access_tab_file_path + tb_name = self.get_setting("sim.inputs.tb_name") + + # Build args + args = [ + vcs_bin, + "-full64", + "-lca", # enable advanced features access, add'l no-cost licenses may be req'd depending on feature + "-debug_access+all" # since I-2014.03, req'd for FSDB dumping & force regs + ] + + if self.get_setting("sim.vcs.fgp") and self.version() >= self.version_number("M-2017.03"): + args.append("-fgp") + + if timescale is not None: + args.append('-timescale={}'.format(timescale)) + + # Add in options we pass to the C++ compiler + args.extend(['-CC', '-I$(VCS_HOME)/include']) + for compiler_cc_opt in compiler_cc_opts: + args.extend(['-CFLAGS', compiler_cc_opt]) + + # vcs requires libraries (-l) to be outside of the LDFLAGS + for compiler_ld_opt in compiler_ld_opts: + if compiler_ld_opt.startswith('-l'): + args.extend([compiler_ld_opt]) + else: + args.extend(['-LDFLAGS', compiler_ld_opt]) + + # black box options + args.extend(options) + + # Multicore options + if isinstance(self.submit_command, HammerLSFSubmitCommand): + if self.submit_command.settings.num_cpus is not None: + args.extend(['-j'+str(self.submit_command.settings.num_cpus)]) + + # Add in all input files + args.extend(abspath_input_files) + + # Note: we always want to get the verilog models because most real designs will instantate a few + # tech-specific cells in the source RTL (IO cells, clock gaters, etc.) + args.extend(self.get_verilog_models()) + + for define in defines: + args.extend(['+define+' + define]) + + if self.level.is_gatelevel(): + args.extend(['-P']) + args.extend([access_tab_filename]) + if self.get_setting("sim.inputs.timing_annotated"): + args.extend(["+neg_tchk"]) + args.extend(["+sdfverbose"]) + args.extend(["-negdelay"]) + args.extend(["-sdf"]) + if self.sdf_file: + args.extend(["max:{top}:{sdf}".format(top=top_module, sdf=os.path.join(os.getcwd(), self.sdf_file))]) + else: + args.extend(["+notimingcheck"]) + args.extend(["+delay_mode_zero"]) + else: + # Also disable timing at RTL level for any hard macros + args.extend(["+notimingcheck"]) + args.extend(["+delay_mode_zero"]) + + + if tb_name != "": + args.extend(["-top", tb_name]) + + args.extend(['-o', self.simulator_executable_path]) + + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + + # Delete an old copy of the simulator if it exists + if os.path.exists(self.simulator_executable_path): + os.remove(self.simulator_executable_path) + + # Remove the csrc directory (otherwise the simulator will be stale) + if os.path.exists(os.path.join(self.run_dir, "csrc")): + shutil.rmtree(os.path.join(self.run_dir, "csrc")) + + # Generate a simulator + self.run_executable(args, cwd=self.run_dir) + + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + return os.path.exists(self.simulator_executable_path) + + def run_simulation(self) -> bool: + if not self.get_setting("sim.inputs.execute_sim"): + self.logger.warning("Not running any simulations because sim.inputs.execute_sim is unset.") + return True + + top_module = self.top_module + exec_flags_prepend = self.get_setting("sim.inputs.execution_flags_prepend", []) + exec_flags = self.get_setting("sim.inputs.execution_flags", []) + exec_flags_append = self.get_setting("sim.inputs.execution_flags_append", []) + force_regs_filename = self.force_regs_file_path + tb_prefix = self.get_setting("sim.inputs.tb_dut") + saif_mode = self.get_setting("sim.inputs.saif.mode") + if saif_mode == "time": + saif_start_time = self.get_setting("sim.inputs.saif.start_time") + saif_end_time = self.get_setting("sim.inputs.saif.end_time") + elif saif_mode == "trigger": + self.logger.error("Trigger SAIF mode currently unsupported.") + elif saif_mode == "trigger_raw": + saif_start_trigger_raw = self.get_setting("sim.inputs.saif.start_trigger_raw") + saif_end_trigger_raw = self.get_setting("sim.inputs.saif.end_trigger_raw") + elif saif_mode == "full": + pass + elif saif_mode == "none": + pass + else: + self.logger.warning("Bad saif_mode:${saif_mode}. Valid modes are time, trigger, full, or none. Defaulting to none.") + saif_mode = "none" + + if self.level == FlowLevel.RTL and saif_mode != "none": + find_regs_run_tcl = [] + if saif_mode != "none": + if saif_mode == "time": + stime = TimeValue(saif_start_time[0]) + find_regs_run_tcl.append("run {start}ns".format(start=stime.value_in_units("ns"))) + elif saif_mode == "trigger_raw": + find_regs_run_tcl.append(saif_start_trigger_raw) + find_regs_run_tcl.append("run") + elif saif_mode == "full": + pass + # start saif + find_regs_run_tcl.append("power {dut}".format(dut=tb_prefix)) + find_regs_run_tcl.append("config endofsim noexit") + if saif_mode == "time": + etime = TimeValue(saif_end_time) + find_regs_run_tcl.append("run {end}ns".format(end=(etime.value_in_units("ns") - stime.value_in_units("ns")))) + elif saif_mode == "trigger_raw": + find_regs_run_tcl.append(saif_end_trigger_raw) + find_regs_run_tcl.append("run") + elif saif_mode == "full": + find_regs_run_tcl.append("run") + # stop saif + find_regs_run_tcl.append("power -report ucli.saif 1e-9 {dut}".format(dut=tb_prefix)) + find_regs_run_tcl.append("run") + find_regs_run_tcl.append("exit") + self.write_contents_to_path("\n".join(find_regs_run_tcl), self.run_tcl_path) + + if self.level.is_gatelevel(): + find_regs_run_tcl = [] + find_regs_run_tcl.append("source " + force_regs_filename) + if saif_mode != "none": + if saif_mode == "time": + stime = TimeValue(saif_start_time[0]) + find_regs_run_tcl.append("run {start}ns".format(start=stime.value_in_units("ns"))) + elif saif_mode == "trigger_raw": + find_regs_run_tcl.append(saif_start_trigger_raw) + find_regs_run_tcl.append("run") + elif saif_mode == "full": + pass + # start saif + find_regs_run_tcl.append("power -gate_level on") + find_regs_run_tcl.append("power {dut}".format(dut=tb_prefix)) + find_regs_run_tcl.append("config endofsim noexit") + if saif_mode == "time": + etime = TimeValue(saif_end_time) + find_regs_run_tcl.append("run {end}ns".format(end=(etime.value_in_units("ns") - stime.value_in_units("ns")))) + elif saif_mode == "trigger_raw": + find_regs_run_tcl.append(saif_end_trigger_raw) + find_regs_run_tcl.append("run") + elif saif_mode == "full": + find_regs_run_tcl.append("run") + # stop saif + find_regs_run_tcl.append("power -report ucli.saif 1e-9 {dut}".format(dut=tb_prefix)) + find_regs_run_tcl.append("run") + find_regs_run_tcl.append("exit") + self.write_contents_to_path("\n".join(find_regs_run_tcl), self.run_tcl_path) + + vcs_bin = self.get_setting("sim.vcs.vcs_bin") + for benchmark in self.benchmarks: + if not os.path.isfile(benchmark): + self.logger.error("benchmark not found as expected at {0}".format(benchmark)) + return False + + # setup simulation arguments + args = [ self.simulator_executable_path ] + args.extend(exec_flags_prepend) + if self.get_setting("sim.vcs.fgp") and self.version() >= self.version_number("M-2017.03"): + # num_threads is in addition to a master thread, so reduce by 1 + num_threads=int(self.get_setting("vlsi.core.max_threads")) - 1 + args.append("-fgp=num_threads:{threads},num_fsdb_threads:0,allow_less_cores,dynamictoggle".format(threads=max(num_threads,1))) + args.extend(exec_flags) + if self.level.is_gatelevel(): + if saif_mode != "none": + args.extend([ + # Reduce the number ucli instructions by auto starting and auto stopping + '-saif_opt+toggle_start_at_set_region+toggle_stop_at_toggle_report', + # Only needed if we are using start time pruning so we can return to ucli after endofsim + '-ucli2Proc', + ]) + args.extend(["-ucli", "-do", self.run_tcl_path]) + elif self.level == FlowLevel.RTL and saif_mode != "none": + args.extend([ + # Reduce the number ucli instructions by auto starting and auto stopping + '-saif_opt+toggle_start_at_set_region+toggle_stop_at_toggle_report', + # Only needed if we are using start time pruning so we can return to ucli after endofsim + '-ucli2Proc', + ]) + args.extend(["-ucli", "-do", self.run_tcl_path]) + args.extend(exec_flags_append) + + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + + # Our current invocation of VCS is only using a single core + if isinstance(self.submit_command, HammerLSFSubmitCommand): + old_settings = self.submit_command.settings._asdict() + del old_settings['num_cpus'] + self.submit_command.settings = HammerLSFSettings(num_cpus=1, **old_settings) + + # Run the simulations in as many parallel runs as the user wants + if self.get_setting("sim.inputs.parallel_runs") == 0: + runs = 1 + else: + runs = self.get_setting("sim.inputs.parallel_runs") + bp = [] # type: List[Process] + running = 0 + ran = 0 + for benchmark in self.benchmarks: + bmark_run_dir = self.benchmark_run_dir(benchmark) + # Make the rundir if it does not exist + hammer_utils.mkdir_p(bmark_run_dir) + if runs > 0 and running >= runs: # We are currently running the maximum number so we join first + bp[ran].join() + ran = ran + 1 + running = running - 1 + bp.append(Process(target=self.run_executable, args=(args + [benchmark],), kwargs={'cwd':bmark_run_dir})) + bp[-1].start() + running = running + 1 + # Make sure we join all remaining runs + for p in bp: + p.join() + + + if self.benchmarks == []: + self.run_executable(args, cwd=self.run_dir) + + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + return True + +tool = VCS diff --git a/hammer/sim/vcs/defaults.yml b/hammer/sim/vcs/defaults.yml new file mode 100644 index 000000000..a8efeba9b --- /dev/null +++ b/hammer/sim/vcs/defaults.yml @@ -0,0 +1,19 @@ +# Default settings for simulation using VCS +sim.vcs: + + # Location of the binary. + vcs_bin: "${synopsys.synopsys_home}/vcs/${sim.vcs.version}/bin/vcs" + vcs_bin_meta: lazysubst + vcs_home: "${synopsys.synopsys_home}/vcs/${sim.vcs.version}" + vcs_home_meta: lazysubst + verdi_home: "${synopsys.synopsys_home}/verdi/${sim.vcs.version}" + verdi_home_meta: lazysubst + + # VCS version to use. + # Used to locate the binary - e.g. the 'M-2017.03' in ${synopsys.synopsys_home}/vcs/M-2017.03/bin/vcs + version: "P-2019.06-SP2-5" + + # Option to turn on Fine-Grained Parallelism (FGP) + # Not all designs benefit from parallelized simulation. + # Use at your own risk - it is known to be buggy! + fgp: false diff --git a/hammer/sim/xcelium/__init__.py b/hammer/sim/xcelium/__init__.py new file mode 100644 index 000000000..fe3420d5e --- /dev/null +++ b/hammer/sim/xcelium/__init__.py @@ -0,0 +1,453 @@ +# HAMMER-VLSI PLUGIN, XCELIUM +# Notes: This plugin sets up xrun to execute in a three-step xrun invocation. +# This bridges multi-tool direct-invocation and the xrun single-invocation. +# As a primer, Xcelium currently supports three methods of running simulations: +# 1) Single call xrun: The recommended Cadence use-style that generates work and scratch dirs, +# invokes appropriate compilers and settings based on input files, and generally simplifies the +# simulation process. +# 2) Multi call xrun: Offers the ability to split the flow into 3 parts with added complications, +# but is clearer when deep access to each step is required. Has all the utility of a direct +# invocation use-style with the added convenience of single call xrun. Additionally, is required +# when elaboration environment is preserved. +# 3) Direct invocation of xmvlog, xmelab, xmsim tools manually. + +import os +import json +import datetime +import io +from typing import Dict, List, Optional, Tuple, Any + +import hammer.tech as hammer_tech +from hammer.vlsi import TimeValue +from hammer.vlsi import HammerSimTool, HammerToolStep, HammerLSFSubmitCommand, HammerLSFSettings +from hammer.logging import HammerVLSILogging +from hammer.common.cadence import CadenceTool + + +class xcelium(HammerSimTool, CadenceTool): + + @property + def xcelium_ext(self) -> List[str]: + verilog_ext = [".v", ".V", ".VS", ".vp", ".VP"] + sverilog_ext = [".sv",".SV",".svp",".SVP",".svi",".svh",".vlib",".VLIB"] + c_cxx_ext = [".c",".cc",".cpp"] + gz_ext = [ext + ".gz" for ext in verilog_ext + sverilog_ext] + z_ext = [ext + ".z" for ext in verilog_ext + sverilog_ext] + return (verilog_ext + sverilog_ext + c_cxx_ext + gz_ext + z_ext) + + @property + def steps(self) -> List[HammerToolStep]: + return self.make_steps_from_methods([self.compile_xrun, + self.elaborate_xrun, + self.sim_xrun]) + + def tool_config_prefix(self) -> str: + return "sim.xcelium" + + @property + def sim_input_prefix(self) -> str: + return "sim.inputs" + + @property + def sim_waveform_prefix(self) -> str: + return "sim.inputs.waveform" + + @property + def xcelium_bin(self) -> str: + return self.get_setting("sim.xcelium.xcelium_bin") + + @property + def sim_tcl_file(self) -> str: + return os.path.join(self.run_dir, "xrun_sim.tcl") + + @property + def sdf_cmd_file(self) -> str: + return os.path.join(self.run_dir, "design.sdf_cmd") + + @property + def post_synth_sdc(self) -> Optional[str]: + pass + + def get_verilog_models(self) -> List[str]: + verilog_sim_files = self.technology.read_libs([ + hammer_tech.filters.verilog_sim_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item) + return verilog_sim_files + + def fill_outputs(self) -> bool: + saif_opts = self.extract_saif_opts() + wav_opts = self.extract_waveform_opts()[1] + + self.output_waveforms = [] + self.output_saifs = [] + self.output_top_module = self.top_module + self.output_tb_name = self.get_setting(f"{self.sim_input_prefix}.tb_name") + self.output_tb_dut = self.get_setting(f"{self.sim_input_prefix}.tb_dut") + self.output_level = self.get_setting(f"{self.sim_input_prefix}.level") + + if saif_opts ["mode"] is not None: + self.output_saifs.append(os.path.join(self.run_dir, "ucli.saif")) + if wav_opts["type"] is not None: + extension = wav_opts["type"].lower() + self.output_waveforms.append(os.path.join(self.run_dir, f'{wav_opts["dump_name"]}.{extension}')) + + return True + + # Several extract functions are used to process mandatory keys into string options. + # Returns a raw input dictionary as well. + + def extract_xrun_opts(self) -> Tuple[Dict[str, str], Dict[str, str]]: + xrun_opts_def = {"enhanced_recompile": True, + "xmlibdirname": None, + "xmlibdirpath": None, + "simtmp": None, + "snapshot": None, + "global_access": False, + "mce": False} + + xrun_opts = self.get_settings_from_dict(xrun_opts_def ,key_prefix=self.tool_config_prefix()) + xrun_opts_proc = xrun_opts.copy() + bool_list = ["global_access", "enhanced_recompile", "mce"] + + if xrun_opts_proc ["global_access"]: + xrun_opts_proc ["global_access"] = "+access+rcw" + else: + xrun_opts_proc ["global_access"] = "" + + if xrun_opts_proc ["enhanced_recompile"]: + xrun_opts_proc ["enhanced_recompile"] = "-fast_recompilation" + else: + xrun_opts_proc ["enhanced_recompile"] = "" + + if xrun_opts_proc ["mce"]: + xrun_opts_proc ["mce"] = "-mce" + else: + xrun_opts_proc ["mce"] = "" + + for opt, setting in xrun_opts_proc.items(): + if opt not in bool_list and setting is not None: + xrun_opts_proc [opt] = f"-{opt} {setting}" + + return xrun_opts_proc, xrun_opts + + def extract_sim_opts(self) -> Tuple[Dict[str, str], Dict[str, str]]: + abspath_input_files = list(map(lambda name: os.path.join(os.getcwd(), name), self.input_files)) + sim_opts_def = {"tb_name": None, + "tb_dut": None, + "timescale": None, + "defines": None, + "incdir": None, + "execute_sim": True, + "compiler_cc_opts": None, + "compiler_ld_opts": None} + + # Defines and incdir are not strictly necessary. + optional_keys = ["defines", "incdir", "compiler_cc_opts", "compiler_ld_opts"] + sim_opts = self.get_settings_from_dict(sim_opts_def, self.sim_input_prefix, optional_keys) + # Additional keys required if GL. + if self.level.is_gatelevel(): + sim_opts ["gl_register_force_value"] = self.get_setting(f"{self.sim_input_prefix}.gl_register_force_value", 0) + sim_opts ["timing_annotated"] = self.get_setting(f"{self.sim_input_prefix}.timing_annotated", False) + + sim_opts_proc = sim_opts.copy() + sim_opts_proc ["input_files"] = "\n".join([input for input in abspath_input_files]) + sim_opts_proc ["tb_name"] = "-top " + sim_opts_proc ["tb_name"] + sim_opts_proc ["timescale"] = "-timescale " + sim_opts_proc ["timescale"] + if sim_opts_proc ["defines"] is not None: sim_opts_proc ["defines"] = "\n".join(["-define " + define for define in sim_opts_proc ["defines"]]) + if sim_opts_proc ["incdir"] is not None: sim_opts_proc ["incdir"] = "\n".join(["-incdir " + incdir for incdir in sim_opts_proc ["incdir"]]) + if sim_opts_proc ["compiler_cc_opts"] is not None: sim_opts_proc ["compiler_cc_opts"] = "\n".join(["-Wcxx," + opt for opt in sim_opts_proc ["compiler_cc_opts"]]) + if sim_opts_proc ["compiler_ld_opts"] is not None: sim_opts_proc ["compiler_ld_opts"] = "\n".join(["-Wld," + opt for opt in sim_opts_proc ["compiler_ld_opts"]]) + + return sim_opts_proc, sim_opts + + def extract_waveform_opts(self) -> Tuple[Dict[str, str], Dict[str, str]]: + wav_opts_def = {"type": None, + "dump_name": "waveform", + "compression": False, + "probe_paths": None, + "tcl_opts": None, + "shm_incr": "5G"} + + # Because key-driven waveform spec is optional, should return none-type dict by default. + wav_opts: Dict[str, Any] = {} + if self.get_setting(f"{self.sim_waveform_prefix}.type") is not None: + optional_keys = ["shm_incr"] + wav_opts = self.get_settings_from_dict(wav_opts_def, self.sim_waveform_prefix, optional_keys) + wav_opts_proc = wav_opts.copy() + wav_opts_proc ["compression"] = "-compress" if wav_opts ["compression"] else "" + if wav_opts_proc ["probe_paths"] is not None: wav_opts_proc ["probe_paths"] = "\n".join(["probe -create " + path for path in wav_opts_proc ["probe_paths"]]) + if wav_opts_proc ["tcl_opts"] is not None: wav_opts_proc ["tcl_opts"] = "\n".join(opt for opt in wav_opts_proc ["tcl_opts"]) + else: + wav_opts = {"type": None} + wav_opts_proc = wav_opts.copy() + + return wav_opts_proc, wav_opts + + def extract_saif_opts(self) -> Dict[str, str]: + + saif_opts = {} + saif_opts ["mode"] = self.get_setting(f"{self.sim_input_prefix}.saif.mode") + + if saif_opts ["mode"] == "time": + saif_opts ["start_time"] = self.get_setting(f"{self.sim_input_prefix}.saif.start_time") + saif_opts ["end_time"] = self.get_setting(f"{self.sim_input_prefix}.saif.end_time") + if saif_opts ["mode"] == "trigger_raw": + saif_opts ["start_trigger_raw"] = self.get_setting(f"{self.sim_input_prefix}.saif.start_trigger_raw") + saif_opts ["end_trigger_raw"] = self.get_setting(f"{self.sim_input_prefix}.saif.end_trigger_raw") + return saif_opts + + # Label generated files + def write_header(self, header: str, wrapper: io.TextIOWrapper)->None: + now = datetime.datetime.now() + wrapper.write("# "+"="*39+"\n") + wrapper.write("# "+header+"\n") + wrapper.write(f"# CREATED AT {now} \n") + wrapper.write("# "+"="*39+"\n") + + # LSF submit command + # Try to maintain some parity with vcs plugin. + def update_submit_options(self)->None: + if isinstance(self.submit_command, HammerLSFSubmitCommand): + settings = self.submit_command.settings._asdict() + if self.submit_command.settings.num_cpus is not None: + settings['num_cpus'] = self.submit_command.settings.num_cpus + else: + settings['num_cpus'] = 1 + self.submit_command.settings = HammerLSFSettings(**settings) + else: + pass + + # Create an xrun.arg file + def generate_arg_file(self, + file_name: str, + header: str, + additional_opt: List[Tuple[str, List[str]]] = [], + sim_opt_removal: List[str]=[], + xrun_opt_removal: List[str]=[]) -> str: + + # Xrun opts and sim opts must generally be carried through for 1:1:1 correspondence between calls. + # However, certain opts must be removed (e.g., during sim step), leading to the inclusion of removal opts. + xrun_opts_proc = self.extract_xrun_opts()[0] + sim_opts_proc = self.extract_sim_opts()[0] + sim_opt_removal.extend(["tb_dut", "execute_sim", "gl_register_force_value", "timing_annotated"]) # Always remove these. + [xrun_opts_proc.pop(opt, None) for opt in xrun_opt_removal] + [sim_opts_proc.pop(opt, None) for opt in sim_opt_removal] + + arg_path = self.run_dir+f"/{file_name}" + f = open(arg_path,"w+") + self.write_header(header, f) + + f.write("\n# XRUN OPTIONS: \n") + [f.write(elem + "\n") for elem in xrun_opts_proc.values() if elem is not None] + f.write("\n# SIM OPTIONS: \n") + [f.write(elem + "\n") for elem in sim_opts_proc.values() if elem is not None] + for opt_list in additional_opt: + if opt_list[1]: + f.write(f"\n# {opt_list[0]} OPTIONS: \n") + [f.write(elem + "\n") for elem in opt_list[1]] + f.close() + + return arg_path + + # Convenience function invoked when multicore options are needed. + def generate_mc_cmd(self) -> str: + opts = "" + num_threads=int(self.get_setting("vlsi.core.max_threads")) - 1 + opts = opts + f"-mce_build_thread_count {num_threads} \n" + opts = opts + f"-mce_sim_thread_count {num_threads} \n" + return opts + + # Deposit values + # Try to maintain some parity with vcs plugin. + def generate_gl_deposit_tcl(self) -> List[str]: + sim_opts = self.extract_sim_opts() [1] + tb_prefix = sim_opts["tb_name"] + '.' + sim_opts["tb_dut"] + force_val = sim_opts["gl_register_force_value"] + + abspath_all_regs = os.path.join(os.getcwd(), self.all_regs) + if not os.path.isfile(abspath_all_regs): + self.logger.error("List of all regs json not found as expected at {0}".format(self.all_regs)) + + formatted_deposit = [] + with open(abspath_all_regs) as reg_file: + reg_json = json.load(reg_file) + assert isinstance(reg_json, List), "list of all sequential cells should be a json list of dictionaries from string to string not {}".format(type(reg_json)) + for reg in sorted(reg_json, key=lambda r: len(r["path"])): + path = reg["path"] + path = path.split('/') + special_char =['[',']','#','$',';','!',"{",'}','\\'] + path = ['@{' + subpath + ' }' if any(char in subpath for char in special_char) else subpath for subpath in path] + path='.'.join(path) + pin = reg["pin"] + formatted_deposit.append("deposit " + tb_prefix + "." + path + "." + pin + " = " + str(force_val)) + + return formatted_deposit + + # Creates an sdf cmd file for command line driven sdf annotation. + # Until sdf annotation provides values other than maximum, sdf_cmd_file will only support mtm max. + def generate_sdf_cmd_file(self) -> bool: + sim_opts = self.extract_sim_opts()[1] + prefix = sim_opts["tb_name"] + '.' + sim_opts["tb_dut"] + + f = open(self.sdf_cmd_file,"w+") + f.write(f'SDF_FILE = "{self.sdf_file}", \n') + f.write(f'MTM_CONTROL = "MAXIMUM", \n') + f.write(f'SCALE_TYPE = "FROM_MAXIMUM", \n') + f.write(f'SCOPE = {prefix};') + f.close() + return True + + # Creates saif arguments for tcl commands for tcl driver. + def generate_saif_tcl_cmd(self) -> str: + saif_opts: Dict[str, Any] = self.extract_saif_opts() + sim_opts = self.extract_sim_opts()[1] + prefix = sim_opts["tb_name"] + '.' + sim_opts["tb_dut"] + + saif_args = "" + + # Process saif options + if saif_opts["mode"] == "time": + saif_start_time = saif_opts["start_time"] + saif_end_time = saif_opts["end_time"] + elif saif_opts["mode"] == "trigger": + self.logger.error("Trigger SAIF mode currently unsupported.") + elif saif_opts["mode"] == "full": + pass + elif saif_opts["mode"] == "trigger_raw": + saif_start_trigger_raw = saif_opts["start_trigger_raw"] + saif_end_trigger_raw = saif_opts["end_trigger_raw"] + else: + self.logger.warning("Bad saif_mode:${saif_mode}. Valid modes are time, full, trigger, or none. Defaulting to none.") + saif_opts["mode"] = None + + if saif_opts["mode"] is not None: + if saif_opts["mode"] == "time": + stime = TimeValue(saif_start_time) + etime = TimeValue(saif_end_time) + saif_args = saif_args + f'dumpsaif -output ucli.saif -overwrite -scope {prefix} -start {stime.value_in_units("ns")}ns -stop{etime.value_in_units("ns")}ns' + elif saif_opts["mode"] == "full": + saif_args = saif_args + f"dumpsaif -output ucli.saif -overwrite -scope {prefix}" + elif saif_opts["mode"] == "trigger_raw": + saif_args = saif_args + f"dumpsaif -output ucli.saif -overwrite -scope {prefix} {saif_start_trigger_raw} {saif_end_trigger_raw}" + return saif_args + + # Creates a tcl driver for simulation. + def generate_sim_tcl(self) -> bool: + xmsimrc_def = self.get_setting("sim.xcelium.xmsimrc_def") + saif_opts = self.extract_saif_opts() + wav_opts_proc, wav_opts = self.extract_waveform_opts() + + f = open(self.sim_tcl_file,"w+") + self.write_header("HAMMER-GEN SIM TCL DRIVER", f) + f.write(f"source {xmsimrc_def} \n") + + # Prepare waveform dump options if specified. + if wav_opts["type"] is not None: + if wav_opts["type"] == "VCD": f.write(f'database -open -vcd vcddb -into {wav_opts["dump_name"]}.vcd -default {wav_opts_proc["compression"]} \n') + elif wav_opts["type"] == "EVCD": f.write(f'database -open -evcd evcddb -into {wav_opts["dump_name"]}.evcd -default {wav_opts_proc["compression"]} \n') + elif wav_opts["type"] == "SHM": f.write(f'database -open -shm shmdb -into {wav_opts["dump_name"]}.shm -event -default {wav_opts_proc["compression"]} {wav_opts_proc["shm_incr"]} \n') + if wav_opts_proc["probe_paths"] is not None: + [f.write(f'{wav_opts_proc["probe_paths"]}\n')] + if wav_opts_proc["tcl_opts"] is not None: [f.write(f'{wav_opts_proc["tcl_opts"]}\n')] + + # Deposit gl values. + if self.level.is_gatelevel(): + formatted_deposit = self.generate_gl_deposit_tcl() + [f.write(f'{deposit}\n') for deposit in formatted_deposit] + + # Create saif file if specified. + if saif_opts["mode"] is not None: + f.write(f'{self.generate_saif_tcl_cmd()}\n') + + # Execute + f.write("run \n") + + # Close databases and dumps properly. + f.write("dumpsaif -end \n") + f.write("database -close *db \n") + f.write("exit") + f.close() + return True + + def compile_xrun(self) -> bool: + + if not os.path.isfile(self.xcelium_bin): + self.logger.error(f"Xcelium (xrun) binary not found at {self.xcelium_bin}.") + return False + + if not self.check_input_files(self.xcelium_ext): + return False + + # Gather complation-only options + xrun_opts = self.extract_xrun_opts()[1] + compile_opts = self.get_setting(f"{self.tool_config_prefix}.compile_opts", []) + compile_opts.append("-logfile xrun_compile.log") + if xrun_opts["mce"]: compile_opts.append(self.generate_mc_cmd()) + compile_opts = ('COMPILE', compile_opts) + + arg_file_path = self.generate_arg_file("xrun_compile.arg", "HAMMER-GEN XRUN COMPILE ARG FILE", [compile_opts]) + args =[self.xcelium_bin] + args.append(f"-compile -f {arg_file_path}") + + self.update_submit_options() + self.run_executable(args, cwd=self.run_dir) + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + return True + + def elaborate_xrun(self) -> bool: + xrun_opts = self.extract_xrun_opts()[1] + sim_opts = self.extract_sim_opts()[1] + elab_opts = self.get_setting(f"{self.tool_config_prefix}.elab_opts", []) + elab_opts.append("-logfile xrun_elab.log") + elab_opts.append("-glsperf") + elab_opts.append("-genafile access.txt") + + if self.level.is_gatelevel(): + elab_opts.extend(self.get_verilog_models()) + if sim_opts["timing_annotated"]: + self.generate_sdf_cmd_file() + elab_opts.append(f"-sdf_cmd_file {self.sdf_cmd_file}") + elab_opts.append("-sdf_verbose") + elab_opts.append("-negdelay") + else: + elab_opts.append("-notimingchecks") + elab_opts.append("-delay_mode zero") + else: + elab_opts.append("-notimingchecks") + elab_opts.append("-delay_mode zero") + + if xrun_opts["mce"]: elab_opts.append(self.generate_mc_cmd()) + elab_opts = ('ELABORATION', elab_opts) + + arg_file_path = self.generate_arg_file("xrun_elab.arg", "HAMMER-GEN XRUN ELAB ARG FILE", [elab_opts]) + args =[self.xcelium_bin] + args.append(f"-elaborate -f {arg_file_path}") + + self.update_submit_options() + self.run_executable(args, cwd=self.run_dir) + return True + + def sim_xrun(self) -> bool: + sim_opts = self.extract_sim_opts()[1] + sim_cmd_opts = self.get_setting(f"{self.sim_input_prefix}.options", []) + sim_opts_removal = ["tb_name", "input_files", "incdir"] + xrun_opts_removal = ["enhanced_recompile", "mce"] + sim_cmd_opts = ('SIMULATION', sim_cmd_opts) + + if not sim_opts["execute_sim"]: + self.logger.warning("Not running any simulations because sim.inputs.execute_sim is unset.") + return True + + arg_file_path = self.generate_arg_file("xrun_sim.arg", "HAMMER-GEN XRUN SIM ARG FILE", [sim_cmd_opts], + sim_opt_removal = sim_opts_removal, + xrun_opt_removal = xrun_opts_removal) + args =[self.xcelium_bin] + args.append(f"-R -f {arg_file_path} -input {self.sim_tcl_file}") + + self.generate_sim_tcl() + self.update_submit_options() + self.run_executable(args, cwd=self.run_dir) + return True + +tool = xcelium diff --git a/hammer/sim/xcelium/defaults.yml b/hammer/sim/xcelium/defaults.yml new file mode 100644 index 000000000..980bbe29c --- /dev/null +++ b/hammer/sim/xcelium/defaults.yml @@ -0,0 +1,36 @@ +sim.xcelium: + # Tool version (e.g., "XCELIUM2103") + version: "XCELIUM2103" + + # Path to xcelium binary. + xcelium_bin: "${cadence.cadence_home}/XCELIUM/${sim.xcelium.version}/tools/xcelium/bin/64bit/xrun" + xcelium_bin_meta: lazysubst + + # Path to xmsimrc_def file. + xmsimrc_def: "${cadence.cadence_home}/XCELIUM/${sim.xcelium.version}/tools/xcelium/files/xmsimrc" + xmsimrc_def_meta: lazysubst + + # ---------------------------- + # XRUN Top-Level Customization + # ---------------------------- + # If true, enable fast recompilation. + enhanced_recompile: True + # Specifies alternative name for xcelium.d working directory + xmlibdirname: null + # Specifies alternative location for xcelium.d working directory + xmlibdirpath: null + # Specifies alternative scratch location for sims. + simtmp: null + # Create an elaborated snapshot of specified name. + snapshot: null + # If true, enables +rwc access globally. Makes debug easier but slows performance. + global_access: False + # If true, enable multicore (similar to sim.vcs.fgp) + # By default it should not be TRUE as there are numerous potential limitations in tcl, + # build switches, and general simulator features. + mce: False + # Opts to access compilation step in xcelium. + compile_opts: null + # Opts to access elaboration step in xcelium. + elab_opts: null + diff --git a/hammer/sim/xcelium/defaults_types.yml b/hammer/sim/xcelium/defaults_types.yml new file mode 100644 index 000000000..e793c0b11 --- /dev/null +++ b/hammer/sim/xcelium/defaults_types.yml @@ -0,0 +1,24 @@ +sim.xcelium: + # Tool version (e.g., "XCELIUM2103") + version: str + # Path to xcelium binary. + xcelium_bin: str + # Path to xmsimrc_def file. + xmsimrc_def: str + # If true, enable fast recompilation. + enhanced_recompile: bool + # Specifies alternative name for xcelium.d working directory + xmlibdirname: Optional[str] + # Specifies alternative location for xcelium.d working directory + xmlibdirpath: Optional[str] + # Specifies alternative scratch location for sims. + simtmp: Optional[str] + # Create an elaborated snapshot of specified name. + snapshot: Optional[str] + # If true, enables +rwc access globally. Makes debug easier but slows performance. + global_access: bool + # If true, enable multicore support. + mce: bool + + compile_opts: Optional[list[str]] + elab_opts: Optional[list[str]] \ No newline at end of file diff --git a/hammer/synthesis/genus/__init__.py b/hammer/synthesis/genus/__init__.py new file mode 100644 index 000000000..6d5a10ed1 --- /dev/null +++ b/hammer/synthesis/genus/__init__.py @@ -0,0 +1,416 @@ +# hammer-vlsi plugin for Cadence Genus. +# +# See LICENSE for licence details. + +from hammer.vlsi import HammerTool, HammerToolStep, HammerToolHookAction, HierarchicalMode +from hammer.utils import VerilogUtils +from hammer.vlsi import HammerSynthesisTool +from hammer.logging import HammerVLSILogging +from hammer.vlsi import MMMCCornerType +import hammer.tech as hammer_tech + +from typing import Dict, List, Any, Optional + +from hammer.tech.specialcells import CellType + +import os +from collections import Counter + +from hammer.common.cadence import CadenceTool + + +class Genus(HammerSynthesisTool, CadenceTool): + @property + def post_synth_sdc(self) -> Optional[str]: + # No post-synth SDC input for synthesis... + return None + + def fill_outputs(self) -> bool: + # Check that the regs paths were written properly if the write_regs step was run + self.output_seq_cells = self.all_cells_path + self.output_all_regs = self.all_regs_path + if self.ran_write_regs: + if not os.path.isfile(self.all_cells_path): + raise ValueError("Output find_regs_cells.json %s not found" % (self.all_cells_path)) + + if not os.path.isfile(self.all_regs_path): + raise ValueError("Output find_regs_paths.json %s not found" % (self.all_regs_path)) + + if not self.process_reg_paths(self.all_regs_path): + self.logger.error("Failed to process all register paths") + else: + self.logger.info("Did not run write_regs") + + # Check that the synthesis outputs exist if the synthesis run was successful + mapped_v = self.mapped_hier_v_path if self.hierarchical_mode.is_nonleaf_hierarchical() else self.mapped_v_path + self.output_files = [mapped_v] + self.output_sdc = self.mapped_sdc_path + self.sdf_file = self.output_sdf_path + if self.ran_write_outputs: + if not os.path.isfile(mapped_v): + raise ValueError("Output mapped verilog %s not found" % (mapped_v)) # better error? + + if not os.path.isfile(self.mapped_sdc_path): + raise ValueError("Output SDC %s not found" % (self.mapped_sdc_path)) # better error? + + if not os.path.isfile(self.output_sdf_path): + raise ValueError("Output SDF %s not found" % (self.output_sdf_path)) + else: + self.logger.info("Did not run write_outputs") + + return True + + @property + def env_vars(self) -> Dict[str, str]: + new_dict = dict(super().env_vars) + new_dict["GENUS_BIN"] = self.get_setting("synthesis.genus.genus_bin") + return new_dict + + def export_config_outputs(self) -> Dict[str, Any]: + outputs = dict(super().export_config_outputs()) + # TODO(edwardw): find a "safer" way of passing around these settings keys. + outputs["synthesis.outputs.sdc"] = self.output_sdc + outputs["synthesis.outputs.seq_cells"] = self.output_seq_cells + outputs["synthesis.outputs.all_regs"] = self.output_all_regs + outputs["synthesis.outputs.sdf_file"] = self.output_sdf_path + return outputs + + def tool_config_prefix(self) -> str: + return "synthesis.genus" + + def get_tool_hooks(self) -> List[HammerToolHookAction]: + return [self.make_persistent_hook(genus_global_settings)] + + @property + def steps(self) -> List[HammerToolStep]: + steps_methods = [ + self.init_environment, + self.syn_generic, + self.syn_map, + self.add_tieoffs, + self.write_regs, + self.generate_reports, + self.write_outputs + ] + if self.get_setting("synthesis.inputs.retime_modules"): + steps_methods.insert(1, self.retime_modules) + return self.make_steps_from_methods(steps_methods) + + def do_pre_steps(self, first_step: HammerToolStep) -> bool: + assert super().do_pre_steps(first_step) + # Reload from the last checkpoint if we're not starting over. + if first_step != self.first_step: + self.verbose_append("read_db pre_{step}".format(step=first_step.name)) + return True + + def do_between_steps(self, prev: HammerToolStep, next: HammerToolStep) -> bool: + assert super().do_between_steps(prev, next) + # Write a checkpoint to disk. + self.verbose_append("write_db -to_file pre_{step}".format(step=next.name)) + return True + + def do_post_steps(self) -> bool: + assert super().do_post_steps() + return self.run_genus() + + @property + def mapped_v_path(self) -> str: + return os.path.join(self.run_dir, "{}.mapped.v".format(self.top_module)) + + @property + def mapped_hier_v_path(self) -> str: + if self.version() >= self.version_number("191"): + return os.path.join(self.run_dir, "{}_noilm.mapped.v".format(self.top_module)) + else: + return os.path.join(self.run_dir, "genus_invs_des/genus.v.gz") + + @property + def mapped_sdc_path(self) -> str: + return os.path.join(self.run_dir, "{}.mapped.sdc".format(self.top_module)) + + @property + def all_regs_path(self) -> str: + return os.path.join(self.run_dir, "find_regs_paths.json") + + @property + def all_cells_path(self) -> str: + return os.path.join(self.run_dir, "find_regs_cells.json") + + @property + def output_sdf_path(self) -> str: + return os.path.join(self.run_dir, "{top}.mapped.sdf".format(top=self.top_module)) + + @property + def ran_write_regs(self) -> bool: + """The write_regs step sets this to True if it was run.""" + return self.attr_getter("_ran_write_regs", False) + + @ran_write_regs.setter + def ran_write_regs(self, val: bool) -> None: + self.attr_setter("_ran_write_regs", val) + + @property + def ran_write_outputs(self) -> bool: + """The write_outputs step sets this to True if it was run.""" + return self.attr_getter("_ran_write_outputs", False) + + @ran_write_outputs.setter + def ran_write_outputs(self, val: bool) -> None: + self.attr_setter("_ran_write_outputs", val) + + def remove_hierarchical_submodules_from_file(self, path: str) -> str: + """ + Remove any hierarchical submodules' implementation from the given Verilog source file in path, if it is present. + If it is not, return the original path. + :param path: Path to verilog source file + :return: A path to a modified version of the original file without the given module, or the same path as before. + """ + with open(path, "r") as f: + source = f.read() + submodules = list(map(lambda ilm: ilm.module, self.get_input_ilms())) + + touched = False + + for submodule in submodules: + if VerilogUtils.contains_module(source, submodule): + source = VerilogUtils.remove_module(source, submodule) + touched = True + + if touched: + # Write the modified input to a new file in run_dir. + name, ext = os.path.splitext(os.path.basename(path)) + new_filename = str(name) + "_no_submodules" + str(ext) + new_path = os.path.join(self.run_dir, new_filename) + with open(new_path, "w") as f: + f.write(source) + return new_path + else: + return path + + def init_environment(self) -> bool: + # Python sucks here for verbosity + verbose_append = self.verbose_append + + # Clock gating setup + if self.get_setting("synthesis.clock_gating_mode") == "auto": + verbose_append("set_db lp_clock_gating_infer_enable true") + # Innovus will create instances named CLKGATE_foo, CLKGATE_bar, etc. + verbose_append("set_db lp_clock_gating_prefix {CLKGATE}") + verbose_append("set_db lp_insert_clock_gating true") + verbose_append("set_db lp_clock_gating_hierarchical true") + verbose_append("set_db lp_insert_clock_gating_incremental true") + verbose_append("set_db lp_clock_gating_register_aware true") + + # Set up libraries. + # Read timing libraries. + mmmc_path = os.path.join(self.run_dir, "mmmc.tcl") + self.write_contents_to_path(self.generate_mmmc_script(), mmmc_path) + verbose_append("read_mmmc {mmmc_path}".format(mmmc_path=mmmc_path)) + + if self.hierarchical_mode.is_nonleaf_hierarchical(): + # Read ILMs. + for ilm in self.get_input_ilms(): + # Assumes that the ILM was created by Innovus (or at least the file/folder structure). + verbose_append("read_ilm -basename {data_dir}/{module}_postRoute -module_name {module}".format( + data_dir=ilm.data_dir, module=ilm.module)) + + # Read LEF layouts. + lef_files = self.technology.read_libs([ + hammer_tech.filters.lef_filter + ], hammer_tech.HammerTechnologyUtils.to_plain_item) + if self.hierarchical_mode.is_nonleaf_hierarchical(): + ilm_lefs = list(map(lambda ilm: ilm.lef, self.get_input_ilms())) + lef_files.extend(ilm_lefs) + verbose_append("read_physical -lef {{ {files} }}".format( + files=" ".join(lef_files) + )) + + # Load input files and check that they are all Verilog. + if not self.check_input_files([".v", ".sv"]): + return False + # We are switching working directories and Genus still needs to find paths. + abspath_input_files = list(map(lambda name: os.path.join(os.getcwd(), name), self.input_files)) # type: List[str] + + # If we are in hierarchical, we need to remove hierarchical sub-modules/sub-blocks. + if self.hierarchical_mode.is_nonleaf_hierarchical(): + abspath_input_files = list(map(self.remove_hierarchical_submodules_from_file, abspath_input_files)) + + # Add any verilog_synth wrappers (which are needed in some technologies e.g. for SRAMs) which need to be + # synthesized. + abspath_input_files += self.technology.read_libs([ + hammer_tech.filters.verilog_synth_filter + ], hammer_tech.HammerTechnologyUtils.to_plain_item) + + # Read the RTL. + verbose_append("read_hdl -sv {{ {} }}".format(" ".join(abspath_input_files))) + + # Elaborate/parse the RTL. + verbose_append("elaborate {}".format(self.top_module)) + # Preserve submodules + if self.hierarchical_mode.is_nonleaf_hierarchical(): + for ilm in self.get_input_ilms(): + verbose_append("set_db module:{top}/{mod} .preserve true".format(top=self.top_module, mod=ilm.module)) + verbose_append("init_design -top {}".format(self.top_module)) + + # Prevent floorplanning targets from getting flattened. + # TODO: is there a way to track instance paths through the synthesis process? + verbose_append("set_db root: .auto_ungroup none") + + # Set units to pF and technology time unit. + # Must be done after elaboration. + verbose_append("set_units -capacitance 1.0pF") + verbose_append("set_load_unit -picofarads 1") + verbose_append("set_units -time 1.0{}".format(self.get_time_unit().value_prefix + self.get_time_unit().unit)) + + # Set "don't use" cells. + for l in self.generate_dont_use_commands(): + self.append(l) + + return True + + def retime_modules(self) -> bool: + retime_mods = self.get_setting("synthesis.inputs.retime_modules") + + if retime_mods: + rt_tcl = ( + f"set rt_mods [get_designs \"{' '.join(retime_mods)}\"]\n" \ + "foreach rt_mod $rt_mods {\n" \ + " set_db $rt_mod .retime true\n" \ + "}\n" \ + "set_db / .retime_verification_flow true" + ) + self.append(rt_tcl) + + return True + + def syn_generic(self) -> bool: + self.verbose_append("syn_generic") + return True + + def syn_map(self) -> bool: + self.verbose_append("syn_map") + # Need to suffix modules for hierarchical simulation if not top + if self.hierarchical_mode not in [HierarchicalMode.Flat, HierarchicalMode.Top]: + self.verbose_append("update_names -module -log hier_updated_names.log -suffix _{MODULE}".format(MODULE=self.top_module)) + return True + + def add_tieoffs(self) -> bool: + tie_hi_cells = self.technology.get_special_cell_by_type(CellType.TieHiCell) + tie_lo_cells = self.technology.get_special_cell_by_type(CellType.TieLoCell) + tie_hilo_cells = self.technology.get_special_cell_by_type(CellType.TieHiLoCell) + + if len(tie_hi_cells) != 1 or len (tie_lo_cells) != 1: + if len(tie_hilo_cells) != 1: + self.logger.warning("Hi and Lo tiecells are unspecified or improperly specified and will not be added during synthesis.") + return True + tie_hi_cells = tie_hilo_cells + tie_lo_cells = tie_hilo_cells + + tie_hi_cell = tie_hi_cells[0].name[0] + tie_lo_cell = tie_lo_cells[0].name[0] + + # Limit "no delay description exists" warnings + self.verbose_append("set_db message:WSDF-201 .max_print 20") + self.verbose_append("set_db use_tiehilo_for_const duplicate") + + # If there is more than 1 corner or a certain type, use lib cells for only the active analysis view + corner_counts = Counter(list(map(lambda c: c.type, self.get_mmmc_corners()))) + if any(cnt>1 for cnt in corner_counts.values()): + self.verbose_append("set ACTIVE_VIEW [string map { .setup_view {} .hold_view {} .extra_view {} } [get_db analysis_view:[get_analysis_views] .name]]") + self.verbose_append("set HI_TIEOFF [get_db base_cell:{TIE_HI_CELL} .lib_cells -if {{ .library.default_opcond == $ACTIVE_VIEW }}]".format(TIE_HI_CELL=tie_hi_cell)) + self.verbose_append("set LO_TIEOFF [get_db base_cell:{TIE_LO_CELL} .lib_cells -if {{ .library.default_opcond == $ACTIVE_VIEW }}]".format(TIE_LO_CELL=tie_lo_cell)) + self.verbose_append("add_tieoffs -high $HI_TIEOFF -low $LO_TIEOFF -max_fanout 1 -verbose") + else: + self.verbose_append("add_tieoffs -high {HI_TIEOFF} -low {LO_TIEOFF} -max_fanout 1 -verbose".format(HI_TIEOFF=tie_hi_cell, LO_TIEOFF=tie_lo_cell)) + return True + + def generate_reports(self) -> bool: + """Generate reports.""" + # TODO: extend report generation capabilities + self.verbose_append("write_reports -directory reports -tag final") + return True + + def write_regs(self) -> bool: + """write regs info to be read in for simulation register forcing""" + if self.hierarchical_mode.is_nonleaf_hierarchical(): + self.append(self.child_modules_tcl()) + self.append(self.write_regs_tcl()) + self.ran_write_regs = True + return True + + def write_outputs(self) -> bool: + verbose_append = self.verbose_append + top = self.top_module + + verbose_append("write_hdl > {}".format(self.mapped_v_path)) + if self.hierarchical_mode.is_nonleaf_hierarchical() and self.version() >= self.version_number("191"): + verbose_append("write_hdl -exclude_ilm > {}".format(self.mapped_hier_v_path)) + verbose_append("write_script > {}.mapped.scr".format(top)) + corners = self.get_mmmc_corners() + if corners: + # First setup corner is default view + view_name="{cname}.setup_view".format(cname=next(filter(lambda c: c.type is MMMCCornerType.Setup, corners)).name) + else: + # TODO: remove hardcoded my_view string + view_name = "my_view" + verbose_append("write_sdc -view {view} > {file}".format(view=view_name, file=self.mapped_sdc_path)) + + verbose_append("write_sdf > {run_dir}/{top}.mapped.sdf".format(run_dir=self.run_dir, top=top)) + + # We just get "Cannot trace ILM directory. Data corrupted." + # -hierarchical needs to be used for non-leaf modules + is_hier = self.hierarchical_mode != HierarchicalMode.Leaf # self.hierarchical_mode != HierarchicalMode.Flat + verbose_append("write_design -innovus {hier_flag} -gzip_files {top}".format( + hier_flag="-hierarchical" if is_hier else "", top=top)) + + self.ran_write_outputs = True + + return True + + def run_genus(self) -> bool: + verbose_append = self.verbose_append + + """Close out the synthesis script and run Genus.""" + # Quit Genus. + verbose_append("quit") + + # Create synthesis script. + syn_tcl_filename = os.path.join(self.run_dir, "syn.tcl") + self.write_contents_to_path("\n".join(self.output), syn_tcl_filename) + + # Build args. + args = [ + self.get_setting("synthesis.genus.genus_bin"), + "-f", syn_tcl_filename, + "-no_gui" + ] + + if bool(self.get_setting("synthesis.genus.generate_only")): + self.logger.info("Generate-only mode: command-line is " + " ".join(args)) + else: + # Temporarily disable colours/tag to make run output more readable. + # TODO: think of a more elegant way to do this? + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + self.run_executable(args, cwd=self.run_dir) # TODO: check for errors and deal with them + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + return True + +def genus_global_settings(ht: HammerTool) -> bool: + """Settings that need to be reapplied at every tool invocation""" + assert isinstance(ht, HammerSynthesisTool) + assert isinstance(ht, CadenceTool) + ht.create_enter_script() + + # Python sucks here for verbosity + verbose_append = ht.verbose_append + + # Generic Settings + verbose_append("set_db hdl_error_on_blackbox true") + verbose_append("set_db max_cpus_per_server {}".format(ht.get_setting("vlsi.core.max_threads"))) + + return True + +tool = Genus diff --git a/hammer/synthesis/genus/defaults.yml b/hammer/synthesis/genus/defaults.yml new file mode 100644 index 000000000..710bda9f6 --- /dev/null +++ b/hammer/synthesis/genus/defaults.yml @@ -0,0 +1,12 @@ +# Default settings for synthesis in Genus, for project/technology configuration and overriding. +synthesis.genus: + # Location of the binary. + genus_bin: "${cadence.cadence_home}/GENUS/GENUS${synthesis.genus.version}/bin/genus" + genus_bin_meta: lazysubst # we want later overrides to be able to affect this + + # Genus version to use. + # Used to locate the binary - e.g. the '171' in ${cadence.cadence_home}/GENUS/GENUS171/bin/genus + version: "171" + + # Generate the TCL file but do not run it yet. + generate_only: false diff --git a/hammer/tech/__init__.py b/hammer/tech/__init__.py index f6313f9a9..90f981805 100644 --- a/hammer/tech/__init__.py +++ b/hammer/tech/__init__.py @@ -101,32 +101,32 @@ def prepend(self, rest_of_path: str) -> str: class Library(BaseModel): # TODO: refactor into library types, currently a Library is defined by just a small # set of these fields (e.g. lef, gds, lib, verilog for stdcell libraries) - name: Optional[str] - ccs_liberty_file: Optional[str] - ccs_library_file: Optional[str] - ecsm_liberty_file: Optional[str] - ecsm_library_file: Optional[str] - corner: Optional[Corner] - itf_files: Optional[MinMaxCap] - lef_file: Optional[str] - klayout_techfile: Optional[str] - spice_file: Optional[str] - gds_file: Optional[str] - milkyway_lib_in_dir: Optional[str] - milkyway_techfile: Optional[str] - nldm_liberty_file: Optional[str] - nldm_library_file: Optional[str] - openaccess_techfile: Optional[str] - provides: Optional[List[Provide]] - qrc_techfile: Optional[str] - supplies: Optional[Supplies] - tluplus_files: Optional[MinMaxCap] - tluplus_map_file: Optional[TLUPlusMapFile] - verilog_sim: Optional[str] - verilog_synth: Optional[str] - spice_model_file: Optional[SpiceModelFile] - power_grid_library: Optional[str] - extra_prefixes: Optional[List[PathPrefix]] + name: Optional[str] = None + ccs_liberty_file: Optional[str] = None + ccs_library_file: Optional[str] = None + ecsm_liberty_file: Optional[str] = None + ecsm_library_file: Optional[str] = None + corner: Optional[Corner] = None + itf_files: Optional[MinMaxCap] = None + lef_file: Optional[str] = None + klayout_techfile: Optional[str] = None + spice_file: Optional[str] = None + gds_file: Optional[str] = None + milkyway_lib_in_dir: Optional[str] = None + milkyway_techfile: Optional[str] = None + nldm_liberty_file: Optional[str] = None + nldm_library_file: Optional[str] = None + openaccess_techfile: Optional[str] = None + provides: Optional[List[Provide]] = None + qrc_techfile: Optional[str] = None + supplies: Optional[Supplies] = None + tluplus_files: Optional[MinMaxCap] = None + tluplus_map_file: Optional[TLUPlusMapFile] = None + verilog_sim: Optional[str] = None + verilog_synth: Optional[str] = None + spice_model_file: Optional[SpiceModelFile] = None + power_grid_library: Optional[str] = None + extra_prefixes: Optional[List[PathPrefix]] = None PathsFunctionType = Callable[[Library], List[str]] diff --git a/hammer/tech/stackup.py b/hammer/tech/stackup.py index 102d76259..76e270a7d 100644 --- a/hammer/tech/stackup.py +++ b/hammer/tech/stackup.py @@ -102,7 +102,7 @@ class Metal(BaseModel): index: int direction: RoutingDirection min_width: Decimal - max_width: Optional[Decimal] + max_width: Optional[Decimal] = None pitch: Decimal offset: Decimal power_strap_widths_and_spacings: List[WidthSpacingTuple] diff --git a/hammer/technology/nangate45/sram_compiler/__init__.py b/hammer/technology/nangate45/sram_compiler/__init__.py index 8fbe3f1ac..9eebd803d 100644 --- a/hammer/technology/nangate45/sram_compiler/__init__.py +++ b/hammer/technology/nangate45/sram_compiler/__init__.py @@ -8,7 +8,7 @@ HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters from hammer.vlsi.vendor import OpenROADTool from hammer.vlsi.units import VoltageValue, TemperatureValue -from hammer.tech import Library, ExtraLibrary +from hammer.tech import Library, ExtraLibrary, Corner, Provide, Supplies from typing import NamedTuple, Dict, Any, List, Optional from abc import ABCMeta, abstractmethod @@ -67,15 +67,15 @@ def generate_sram(self, params: SRAMParameters, name=sram_name, nldm_liberty_file=dst_lib, lef_file=dst_lef, - corner = { - 'nmos': "typical", - 'pmos': "typical", - 'temperature': str(corner.temp.value_in_units("C")) +" C" - }, - supplies = { - 'VDD': str(corner.voltage.value_in_units("V")) + " V", - 'VSS': "0 V" - }, - provides = [{'lib_type': "sram", 'vt': params.vt}])) + corner=Corner( + nmos="typical", + pmos="typical", + temperature=str(corner.temp.value_in_units("C")) +" C" + ), + supplies=Supplies( + VDD=str(corner.voltage.value_in_units("V")) + " V", + GND="0 V" + ), + provides=[Provide(lib_type="sram", vt=params.vt)])) tool=Nangate45SRAMGenerator diff --git a/hammer/technology/sky130/defaults.yml b/hammer/technology/sky130/defaults.yml index d1be2af6f..d38f75f8b 100644 --- a/hammer/technology/sky130/defaults.yml +++ b/hammer/technology/sky130/defaults.yml @@ -143,3 +143,7 @@ par.generate_power_straps_options: power_utilization_met2: 0.05 power_utilization_met4: 0.15 power_utilization_met5: 0.5 + +sim.inputs: + defines: ["FUNCTIONAL", "UNIT_DELAY=#1"] + defines_meta: append diff --git a/hammer/technology/sky130/sram_compiler/__init__.py b/hammer/technology/sky130/sram_compiler/__init__.py index c1a20135f..7185d341b 100644 --- a/hammer/technology/sky130/sram_compiler/__init__.py +++ b/hammer/technology/sky130/sram_compiler/__init__.py @@ -3,6 +3,7 @@ from pathlib import Path from hammer.vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters +from hammer.tech import Corner, Supplies, Provide from hammer.vlsi.units import VoltageValue, TemperatureValue from hammer.tech import Library, ExtraLibrary from typing import NamedTuple, Dict, Any, List @@ -34,7 +35,7 @@ def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibr if params.family != "1rw" and params.family != "1rw1r": self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family)) - return ExtraLibrary(prefix=None, library=None) + return ExtraLibrary(prefix=None, library=None) # type: ignore if params.name.startswith("sramgen_sram"): self.logger.info(f"Compiling {params.family} memories to SRAM22 instances") @@ -62,9 +63,9 @@ def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibr gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name), spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name), verilog_sim="{b}/{n}/{n}.v".format(b=base_dir,n=sram_name), - corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"}, - supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"}, - provides=[{'lib_type': "sram", 'vt': params.vt}])) + corner=Corner(nmos=speed_name, pmos=speed_name, temperature=str(corner.temp.value_in_units("C")) + " C"), + supplies=Supplies(VDD=str(corner.voltage.value_in_units("V")) + " V", GND="0 V"), + provides=[Provide(lib_type="sram", vt=params.vt)])) # TODO: remove OpenRAM support very soon elif params.name.startswith("sky130_sram_"): @@ -101,9 +102,9 @@ def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibr gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name), spice_file="{b}/{n}/{n}.lvs.sp".format(b=cache_dir,n=sram_name), verilog_sim="{b}/{n}/{n}.v".format(b=cache_dir,n=sram_name), - corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"}, - supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"}, - provides=[{'lib_type': "sram", 'vt': params.vt}])) + corner=Corner(nmos=speed_name, pmos=speed_name, temperature=str(corner.temp.value_in_units("C")) + " C"), + supplies=Supplies(VDD=str(corner.voltage.value_in_units("V")) + " V", GND="0 V"), + provides=[Provide(lib_type="sram", vt=params.vt)])) else: self.logger.error(f"SRAM {params.name} not supported") return ExtraLibrary(prefix=None, library=Library()) diff --git a/hammer/timing/tempus/__init__.py b/hammer/timing/tempus/__init__.py new file mode 100644 index 000000000..2d32b4f67 --- /dev/null +++ b/hammer/timing/tempus/__init__.py @@ -0,0 +1,288 @@ +# hammer-vlsi plugin for Cadence Tempus. +# +# See LICENSE for licence details. + +from typing import List, Dict, Tuple + +import os +import errno + +from hammer.vlsi import HammerTool, HammerTimingTool, HammerToolStep, HammerToolHookAction, \ + MMMCCornerType +from hammer.logging import HammerVLSILogging +import hammer.tech as hammer_tech +from hammer.common.cadence import CadenceTool + +# Notes: this plugin should only use snake_case (common UI) commands. + +class Tempus(HammerTimingTool, CadenceTool): + + def tool_config_prefix(self) -> str: + return "timing.tempus" + + @property + def env_vars(self) -> Dict[str, str]: + v = dict(super().env_vars) + v["TEMPUS_BIN"] = self.get_setting("timing.tempus.tempus_bin") + return v + + @property + def _step_transitions(self) -> List[Tuple[str, str]]: + """ + Private helper property to keep track of which steps we ran so that we + can create symlinks. + This is a list of (pre, post) steps + """ + return self.attr_getter("__step_transitions", []) + + @_step_transitions.setter + def _step_transitions(self, value: List[Tuple[str, str]]) -> None: + self.attr_setter("__step_transitions", value) + + + def do_pre_steps(self, first_step: HammerToolStep) -> bool: + assert super().do_pre_steps(first_step) + # Restart from the last checkpoint if we're not starting over. + # Not in the dofile, must be a command-line option + if first_step != self.first_step: + self.append("read_db pre_{step}".format(step=first_step.name)) + return True + + def do_between_steps(self, prev: HammerToolStep, next: HammerToolStep) -> bool: + assert super().do_between_steps(prev, next) + # Write a checkpoint to disk. + self.append("write_db -overwrite pre_{step}".format(step=next.name)) + # Symlink the checkpoint to latest for open_db script later. + self.append(f"ln -sfn pre_{next.name} latest") + self._step_transitions = self._step_transitions + [(prev.name, next.name)] + return True + + def do_post_steps(self) -> bool: + assert super().do_post_steps() + # Create symlinks for post_ to pre_ to improve usability. + try: + for prev, next in self._step_transitions: + os.symlink( + os.path.join(self.run_dir, f"pre_{next}"), # src + os.path.join(self.run_dir, f"post_{prev}") # dst + ) + except OSError as e: + if e.errno != errno.EEXIST: + self.logger.warning("Failed to create post_* symlinks: " + str(e)) + + # Create checkpoint post_ + # TODO: this doesn't work if you're only running the very last step + if len(self._step_transitions) > 0: + last = "post_{step}".format(step=self._step_transitions[-1][1]) + self.append("write_db -overwrite {last}".format(last=last)) + # Symlink the database to latest for open_db script later. + self.append(f"ln -sfn {last} latest") + + return self.run_tempus() and self.generate_open_db() + + def get_tool_hooks(self) -> List[HammerToolHookAction]: + return [self.make_persistent_hook(tempus_global_settings)] + + @property + def steps(self) -> List[HammerToolStep]: + steps = [ + self.init_design, + self.run_sta + ] + return self.make_steps_from_methods(steps) + + def init_design(self) -> bool: + """ Load design and analysis corners """ + verbose_append = self.verbose_append + + # Read timing libraries and generate timing constraints. + # TODO: support non-MMMC mode, use standalone SDC instead + # TODO: read AOCV or SOCV+LVF libraries if available + mmmc_path = os.path.join(self.run_dir, "mmmc.tcl") + self.write_contents_to_path(self.generate_mmmc_script(), mmmc_path) + verbose_append("read_mmmc {mmmc_path}".format(mmmc_path=mmmc_path)) + + # Read physical LEFs (optional in Tempus) + lef_files = self.technology.read_libs([ + hammer_tech.filters.lef_filter + ], hammer_tech.HammerTechnologyUtils.to_plain_item) + if self.hierarchical_mode.is_nonleaf_hierarchical(): + ilm_lefs = list(map(lambda ilm: ilm.lef, self.get_input_ilms())) + lef_files.extend(ilm_lefs) + verbose_append("read_physical -lef {{ {files} }}".format( + files=" ".join(lef_files) + )) + + # Read netlist. + # Tempus only supports structural Verilog for the netlist; the Verilog can be optionally compressed. + if not self.check_input_files([".v", ".v.gz"]): + return False + + # We are switching working directories and we still need to find paths. + abspath_input_files = list(map(lambda name: os.path.join(os.getcwd(), name), self.input_files)) + verbose_append("read_netlist {{ {files} }} -top {top}".format( + files=" ".join(abspath_input_files), + top=self.top_module + )) + + if self.hierarchical_mode.is_nonleaf_hierarchical(): + # Read ILMs. + for ilm in self.get_input_ilms(): + # Assumes that the ILM was created by Innovus (or at least the file/folder structure). + # TODO: support non-Innovus hierarchical (read netlists, etc.) + verbose_append("read_ilm -cell {module} -directory {dir}".format(dir=ilm.dir, module=ilm.module)) + + # Read power intent + if self.get_setting("vlsi.inputs.power_spec_mode") != "empty": + # Setup power settings from cpf/upf + for l in self.generate_power_spec_commands(): + verbose_append(l) + + # Read parasitics + if self.spefs is not None: # post-P&R + corners = self.get_mmmc_corners() + if corners: + rc_corners = [] # type: List[str] + for corner in corners: + # Setting up views for all defined corner types: setup, hold, extra + if corner.type is MMMCCornerType.Setup: + corner_name = "{n}.{t}".format(n=corner.name, t="setup") + elif corner.type is MMMCCornerType.Hold: + corner_name = "{n}.{t}".format(n=corner.name, t="hold") + elif corner.type is MMMCCornerType.Extra: + corner_name = "{n}.{t}".format(n=corner.name, t="extra") + else: + raise ValueError("Unsupported MMMCCornerType") + rc_corners.append("{n}_rc".format(n=corner_name)) + + # Match spefs with corners. Ordering must match (ensured here by get_mmmc_corners())! + for (spef, rc_corner) in zip(self.spefs, rc_corners): + verbose_append("read_spef {spef} -rc_corner {corner}".format(spef=os.path.join(os.getcwd(), spef), corner=rc_corner)) + + else: + verbose_append("read_spef " + os.path.join(os.getcwd(), self.spefs[0])) + + # Read delay data (optional in Tempus) + if self.sdf_file is not None: + verbose_append("read_sdf " + os.path.join(os.getcwd(), self.sdf_file)) + + verbose_append("init_design") + + # TODO: Optionally read additional DEF or OA physical data + + + # Set some default analysis settings for max accuracy + # Clock path pessimism removal + verbose_append("set_db timing_analysis_cppr both") + # On-chip variation analysis + verbose_append("set_db timing_analysis_type ocv") + # Partial path-based analysis even in graph-based analysis mode + verbose_append("set_db timing_analysis_graph_pba_mode true") + # Equivalent waveform model w/ waveform propagation + verbose_append("set_db delaycal_equivalent_waveform_model propagation") + + # Enable signal integrity delay and glitch analysis + if self.get_setting("timing.tempus.si_glitch"): + verbose_append("set_db si_num_iteration 3") + verbose_append("set_db si_delay_enable_report true") + verbose_append("set_db si_delay_separate_on_data true") + verbose_append("set_db si_delay_enable_logical_correlation true") + verbose_append("set_db si_glitch_enable_report true") + verbose_append("set_db si_enable_glitch_propagation true") + verbose_append("set_db si_enable_glitch_overshoot_undershoot true") + verbose_append("set_db delaycal_enable_si true") + verbose_append("set_db timing_enable_timing_window_pessimism_removal true") + # Check for correct noise models (ECSMN, CCSN, etc.) + verbose_append("check_noise") + + return True + + def run_sta(self) -> bool: + """ Run Static Timing Analysis """ + verbose_append = self.verbose_append + + # report_timing + verbose_append("set_db timing_report_timing_header_detail_info extended") + # Note this reports everything - setup, hold, recovery, etc. + verbose_append(f"report_timing -retime path_slew_propagation -max_paths {self.max_paths} > timing.rpt") + verbose_append(f"report_timing -unconstrained -debug unconstrained -max_paths {self.max_paths} > unconstrained.rpt") + + if self.get_setting("timing.tempus.si_glitch"): + # SI max/min delay + verbose_append("report_noise -delay max -out_file max_si_delay") + verbose_append("report_noise -delay min -out_file min_si_delay") + # Glitch and summary histogram + verbose_append("report_noise -out_file glitch") + verbose_append("report_noise -histogram") + + return True + + def generate_open_db(self) -> bool: + # Make sure that generated-scripts exists. + generated_scripts_dir = os.path.join(self.run_dir, "generated-scripts") + os.makedirs(generated_scripts_dir, exist_ok=True) + + # Script to open results checkpoint + self.output.clear() + self.create_enter_script() + open_db_tcl = os.path.join(generated_scripts_dir, "open_db.tcl") + assert super().do_pre_steps(self.first_step) + self.append("read_db latest") + self.write_contents_to_path("\n".join(self.output), open_db_tcl) + open_db_script = os.path.join(generated_scripts_dir, "open_db") + with open(open_db_script, "w") as f: + f.write("""#!/bin/bash + cd {run_dir} + source enter + $TEMPUS_BIN -stylus -files {open_db_tcl} + """.format(run_dir=self.run_dir, open_db_tcl=open_db_tcl)) + os.chmod(open_db_script, 0o755) + + return True + + def run_tempus(self) -> bool: + # Quit + self.append("exit") + + # Write main dofile + timing_script = os.path.join(self.run_dir, "timing.tcl") + self.write_contents_to_path("\n".join(self.output), timing_script) + + # Build args + # TODO: enable Signoff ECO with -tso (-eco?) option + args = [ + self.get_setting("timing.tempus.tempus_bin"), + "-no_gui", # no GUI + "-stylus", # common UI + "-files", timing_script + ] + + # Temporarily disable colours/tag to make run output more readable. + # TODO: think of a more elegant way to do this? + HammerVLSILogging.enable_colour = False + HammerVLSILogging.enable_tag = False + self.run_executable(args, cwd=self.run_dir) + # TODO: check for errors and deal with them + HammerVLSILogging.enable_colour = True + HammerVLSILogging.enable_tag = True + + # TODO: check that timing run was successful + + return True + +def tempus_global_settings(ht: HammerTool) -> bool: + """Settings that need to be reapplied at every tool invocation""" + assert isinstance(ht, HammerTimingTool) + assert isinstance(ht, CadenceTool) + ht.create_enter_script() + + # Python sucks here for verbosity + verbose_append = ht.verbose_append + + # Generic settings + verbose_append("set_db design_process_node {}".format(ht.get_setting("vlsi.core.node"))) + verbose_append("set_multi_cpu_usage -local_cpu {}".format(ht.get_setting("vlsi.core.max_threads"))) + + return True + +tool = Tempus diff --git a/hammer/timing/tempus/defaults.yml b/hammer/timing/tempus/defaults.yml new file mode 100644 index 000000000..2425bf282 --- /dev/null +++ b/hammer/timing/tempus/defaults.yml @@ -0,0 +1,16 @@ +# Configuration options and defaults for Tempus. +# The values specified in this file are the defaults. + +timing.tempus: + # Location of the binary + tempus_bin: "${cadence.cadence_home}/SSV/SSV${timing.tempus.version}/bin/tempus" + tempus_bin_meta: lazysubst # we want later overrides to be able to affect this + + # Tempus version to use + # Used to locate the binary - e.g. the '211_ISR3' in ${cadence.cadence_home}/SSV/SSV211_ISR3/bin/tempus + version: "211_ISR3" + + # Enable signal integrity delay and glitch analysis + # Note: your tech libs should have noise models! + # type: bool + si_glitch: false diff --git a/hammer/vlsi/hammer_vlsi_impl.py b/hammer/vlsi/hammer_vlsi_impl.py index c859e853b..2cf3fca54 100644 --- a/hammer/vlsi/hammer_vlsi_impl.py +++ b/hammer/vlsi/hammer_vlsi_impl.py @@ -6,13 +6,12 @@ from abc import abstractmethod import importlib import importlib.resources as resources -import os -import sys import json from typing import Iterable import inspect import datetime from statistics import mode +import os import hammer.config as hammer_config from hammer.utils import deepdict, coerce_to_grid, get_or_else @@ -2221,75 +2220,8 @@ def block_append(self, cmds: str, clean: bool = True, verbose: bool = True) -> b self.block_tcl_append(cmds, self.output, clean, verbose) return True -class SynopsysTool(HasSDCSupport, TCLTool, HammerTool): - """Mix-in trait with functions useful for Synopsys-based tools.""" - - ## FIXME: not used by any Synopsys tool - @property - def post_synth_sdc(self) -> Optional[str]: - return None - - @property - def env_vars(self) -> Dict[str, str]: - """ - Get the list of environment variables required for this tool. - Note to subclasses: remember to include variables from super().env_vars! - """ - result = dict(super().env_vars) - result.update({ - "SNPSLMD_LICENSE_FILE": self.get_setting("synopsys.SNPSLMD_LICENSE_FILE"), - # TODO: this is actually a Mentor Graphics licence, not sure why the old dc scripts depend on it. - "MGLS_LICENSE_FILE": self.get_setting("synopsys.MGLS_LICENSE_FILE") - }) - return result - - def version_number(self, version: str) -> int: - """ - Assumes versions look like NAME-YYYY.MM-SPMINOR. - Assumes less than 100 minor versions. - """ - date = "-".join(version.split("-")[1:]) # type: str - year = int(date.split(".")[0]) # type: int - month = int(date.split(".")[1][:2]) # type: int - minor_version = 0 # type: int - if "-" in date: - minor_version = int(date.split("-")[1][2:]) - return (year * 100 + month) * 100 + minor_version - - @property - def header(self) -> str: - """ - Header for all generated Tcl scripts - """ - header_text = f""" - # --------------------------------------------------------------------------------- - # Portions Copyright ©{datetime.date.today().year} Synopsys, Inc. All rights reserved. Portions of - # these TCL scripts are proprietary to and owned by Synopsys, Inc. and may only be - # used for internal use by educational institutions (including United States - # government labs, research institutes and federally funded research and - # development centers) on Synopsys tools for non-profit research, development, - # instruction, and other non-commercial uses or as otherwise specifically set forth - # by written agreement with Synopsys. All other use, reproduction, modification, or - # distribution of these TCL scripts is strictly prohibited. - # --------------------------------------------------------------------------------- - """ - return inspect.cleandoc(header_text) - - def get_synopsys_rm_tarball(self, product: str, settings_key: str = "") -> str: - """Locate reference methodology tarball. - - :param product: Either "DC" or "ICC" - :param settings_key: Key to retrieve the version for the product. Leave blank for DC and ICC. - """ - key = self.tool_config_prefix() + "." + "version" # type: str - - synopsys_rm_tarball = os.path.join(self.get_setting("synopsys.rm_dir"), "%s-RM_%s.tar" % (product, self.get_setting(key))) - if not os.path.exists(synopsys_rm_tarball): - # TODO: convert these to logger calls - raise FileNotFoundError("Expected reference methodology tarball not found at %s. Use the Synopsys RM generator to generate a DC reference methodology. If these tarballs have been pre-downloaded, you can set synopsys.rm_dir instead of generating them yourself." % (synopsys_rm_tarball)) - else: - return synopsys_rm_tarball +# TODO: when mentor tool plugins can be public, move this class to hammer.common.mentor class MentorTool(HammerTool): """ Mix-in trait with functions useful for Mentor-Graphics-based tools. """ diff --git a/poetry.lock b/poetry.lock index 399c02481..debc5ec5f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -213,14 +213,14 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.1.0" +version = "1.1.1" description = "Backport of PEP 654 (exception groups)" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.1.0-py3-none-any.whl", hash = "sha256:327cbda3da756e2de031a3107b81ab7b3770a602c4d16ca618298c526f4bec1e"}, - {file = "exceptiongroup-1.1.0.tar.gz", hash = "sha256:bcb67d800a4497e1b404c2dd44fca47d3b7a5e5433dbab67f96c1a685cdfdf23"}, + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, ] [package.extras] @@ -583,46 +583,42 @@ files = [ [[package]] name = "mypy" -version = "0.991" +version = "1.1.1" description = "Optional static typing for Python" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"}, - {file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"}, - {file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"}, - {file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"}, - {file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"}, - {file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"}, - {file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"}, - {file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"}, - {file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"}, - {file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"}, - {file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"}, - {file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"}, - {file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"}, - {file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"}, - {file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"}, - {file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"}, - {file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"}, - {file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"}, - {file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"}, - {file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"}, - {file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"}, - {file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"}, - {file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"}, - {file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"}, + {file = "mypy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39c7119335be05630611ee798cc982623b9e8f0cff04a0b48dfc26100e0b97af"}, + {file = "mypy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61bf08362e93b6b12fad3eab68c4ea903a077b87c90ac06c11e3d7a09b56b9c1"}, + {file = "mypy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbb19c9f662e41e474e0cff502b7064a7edc6764f5262b6cd91d698163196799"}, + {file = "mypy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:315ac73cc1cce4771c27d426b7ea558fb4e2836f89cb0296cbe056894e3a1f78"}, + {file = "mypy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:5cb14ff9919b7df3538590fc4d4c49a0f84392237cbf5f7a816b4161c061829e"}, + {file = "mypy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:26cdd6a22b9b40b2fd71881a8a4f34b4d7914c679f154f43385ca878a8297389"}, + {file = "mypy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b5f81b40d94c785f288948c16e1f2da37203c6006546c5d947aab6f90aefef2"}, + {file = "mypy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21b437be1c02712a605591e1ed1d858aba681757a1e55fe678a15c2244cd68a5"}, + {file = "mypy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d809f88734f44a0d44959d795b1e6f64b2bbe0ea4d9cc4776aa588bb4229fc1c"}, + {file = "mypy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:a380c041db500e1410bb5b16b3c1c35e61e773a5c3517926b81dfdab7582be54"}, + {file = "mypy-1.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7c7b708fe9a871a96626d61912e3f4ddd365bf7f39128362bc50cbd74a634d5"}, + {file = "mypy-1.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1c10fa12df1232c936830839e2e935d090fc9ee315744ac33b8a32216b93707"}, + {file = "mypy-1.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0a28a76785bf57655a8ea5eb0540a15b0e781c807b5aa798bd463779988fa1d5"}, + {file = "mypy-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ef6a01e563ec6a4940784c574d33f6ac1943864634517984471642908b30b6f7"}, + {file = "mypy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d64c28e03ce40d5303450f547e07418c64c241669ab20610f273c9e6290b4b0b"}, + {file = "mypy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64cc3afb3e9e71a79d06e3ed24bb508a6d66f782aff7e56f628bf35ba2e0ba51"}, + {file = "mypy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce61663faf7a8e5ec6f456857bfbcec2901fbdb3ad958b778403f63b9e606a1b"}, + {file = "mypy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2b0c373d071593deefbcdd87ec8db91ea13bd8f1328d44947e88beae21e8d5e9"}, + {file = "mypy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:2888ce4fe5aae5a673386fa232473014056967f3904f5abfcf6367b5af1f612a"}, + {file = "mypy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:19ba15f9627a5723e522d007fe708007bae52b93faab00f95d72f03e1afa9598"}, + {file = "mypy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:59bbd71e5c58eed2e992ce6523180e03c221dcd92b52f0e792f291d67b15a71c"}, + {file = "mypy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9401e33814cec6aec8c03a9548e9385e0e228fc1b8b0a37b9ea21038e64cdd8a"}, + {file = "mypy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b398d8b1f4fba0e3c6463e02f8ad3346f71956b92287af22c9b12c3ec965a9f"}, + {file = "mypy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:69b35d1dcb5707382810765ed34da9db47e7f95b3528334a3c999b0c90fe523f"}, + {file = "mypy-1.1.1-py3-none-any.whl", hash = "sha256:4e4e8b362cdf99ba00c2b218036002bdcdf1e0de085cdb296a49df03fb31dfc4"}, + {file = "mypy-1.1.1.tar.gz", hash = "sha256:ae9ceae0f5b9059f33dbc62dea087e942c0ccab4b7a003719cb70f9b8abfa32f"}, ] [package.dependencies] -mypy-extensions = ">=0.4.3" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typing-extensions = ">=3.10" @@ -723,14 +719,14 @@ files = [ [[package]] name = "platformdirs" -version = "3.1.0" +version = "3.1.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "platformdirs-3.1.0-py3-none-any.whl", hash = "sha256:13b08a53ed71021350c9e300d4ea8668438fb0046ab3937ac9a29913a1a1350a"}, - {file = "platformdirs-3.1.0.tar.gz", hash = "sha256:accc3665857288317f32c7bebb5a8e482ba717b474f3fc1d18ca7f9214be0cef"}, + {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, + {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, ] [package.extras] @@ -902,13 +898,6 @@ files = [ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, @@ -994,7 +983,6 @@ files = [ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:d000f258cf42fec2b1bbf2863c61d7b8918d31ffee905da62dede869254d3b8a"}, {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:045e0626baf1c52e5527bd5db361bc83180faaba2ff586e763d3d5982a876a9e"}, {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_12_6_arm64.whl", hash = "sha256:721bc4ba4525f53f6a611ec0967bdcee61b31df5a56801281027a3a6d1c2daf5"}, - {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:41d0f1fa4c6830176eef5b276af04c89320ea616655d01327d5ce65e50575c94"}, {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:4b3a93bb9bc662fc1f99c5c3ea8e623d8b23ad22f861eb6fce9377ac07ad6072"}, {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_12_0_arm64.whl", hash = "sha256:a234a20ae07e8469da311e182e70ef6b199d0fbeb6c6cc2901204dd87fb867e8"}, {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:15910ef4f3e537eea7fe45f8a5d19997479940d9196f357152a09031c5be59f3"}, @@ -1337,14 +1325,14 @@ files = [ [[package]] name = "urllib3" -version = "1.26.14" +version = "1.26.15" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"}, - {file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"}, + {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, + {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, ] [package.extras] @@ -1354,14 +1342,14 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.20.0" +version = "20.21.0" description = "Virtual Python Environment builder" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.20.0-py3-none-any.whl", hash = "sha256:3c22fa5a7c7aa106ced59934d2c20a2ecb7f49b4130b8bf444178a16b880fa45"}, - {file = "virtualenv-20.20.0.tar.gz", hash = "sha256:a8a4b8ca1e28f864b7514a253f98c1d62b64e31e77325ba279248c65fb4fcef4"}, + {file = "virtualenv-20.21.0-py3-none-any.whl", hash = "sha256:31712f8f2a17bd06234fa97fdf19609e789dd4e3e4bf108c3da71d710651adbc"}, + {file = "virtualenv-20.21.0.tar.gz", hash = "sha256:f50e3e60f990a0757c9b68333c9fdaa72d7188caa417f96af9e52407831a3b68"}, ] [package.dependencies] @@ -1481,4 +1469,4 @@ asap7-gdstk = ["gdstk"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "8b2b3f69770b707c534d31fd3a692f2acf4e7192d8f1c7e4da8cc974808f982c" +content-hash = "38fbb54962c75a596133962efa85cc4f1c1d5430375742c3f2156ee4046b1dbe" diff --git a/pyproject.toml b/pyproject.toml index 9ee2da7bf..8de13c054 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ asap7 = ["gdspy"] [tool.poetry.dev-dependencies] pytest = "^7.1" -mypy = "^0.991" +mypy = "^1.0.0" types-PyYAML = "^6.0.0" tox = "^3.25.1" Sphinx = "^5.1.1" diff --git a/tests/test_config.py b/tests/test_config.py index 056cad492..321b1e3bb 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1031,7 +1031,7 @@ def test_get_settings_from_dict(self) -> None: # Test prefix functionality. for (input_dict, prefix, opt, ref) in zip(inputs, prefixes, opts, refs): - assert db.get_settings_from_dict(input_dict, prefix, opt) == ref + assert db.get_settings_from_dict(input_dict, prefix, opt) == ref # type: ignore # In the final case, test error handling when mandatory keys are not specified. with pytest.raises(ValueError): diff --git a/tests/test_constraints.py b/tests/test_constraints.py index 6f55b39c8..be1e2c0f6 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -120,7 +120,7 @@ def test_bump_naming(self) -> None: BumpAssignment(name="VDD",no_connect=False,x=Decimal(203),y=Decimal(21),group=None,custom_cell=None), BumpAssignment(name="VSS",no_connect=False,x=Decimal(202),y=Decimal(20),group=None,custom_cell=None) ] - definition = BumpsDefinition(x=204,y=204,pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=0, global_y_offset=0, cell="bumpcell",assignments=assignments) + definition = BumpsDefinition(x=204,y=204,pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=Decimal('0'), global_y_offset=Decimal('0'), cell="bumpcell",assignments=assignments) for a in assignments: if a.name == "foo": @@ -181,16 +181,16 @@ def test_bump_naming(self) -> None: assignments = [ BumpAssignment(name="foo",no_connect=False,x=Decimal(1),y=Decimal(1),group=None,custom_cell=None) ] - definition = BumpsDefinition(x=420,y=420, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=0, global_y_offset=0, cell="bumpcell",assignments=assignments) + definition = BumpsDefinition(x=420,y=420, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=Decimal('0'), global_y_offset=Decimal('0'), cell="bumpcell",assignments=assignments) assert BumpsPinNamingScheme.A1.name_bump(definition, assignments[0]) == "YY420" - definition = BumpsDefinition(x=421,y=421, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=0, global_y_offset=0, cell="bumpcell",assignments=assignments) + definition = BumpsDefinition(x=421,y=421, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=Decimal('0'), global_y_offset=Decimal('0'), cell="bumpcell",assignments=assignments) assert BumpsPinNamingScheme.A1.name_bump(definition, assignments[0]) == "AAA421" - definition = BumpsDefinition(x=8420,y=8420, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=0, global_y_offset=0, cell="bumpcell",assignments=assignments) + definition = BumpsDefinition(x=8420,y=8420, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=Decimal('0'), global_y_offset=Decimal('0'), cell="bumpcell",assignments=assignments) assert BumpsPinNamingScheme.A1.name_bump(definition, assignments[0]) == "YYY8420" - definition = BumpsDefinition(x=8421,y=8421, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=0, global_y_offset=0, cell="bumpcell",assignments=assignments) + definition = BumpsDefinition(x=8421,y=8421, pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=Decimal('0'), global_y_offset=Decimal('0'), cell="bumpcell",assignments=assignments) assert BumpsPinNamingScheme.A1.name_bump(definition, assignments[0]) == "AAAA8421" def test_get_by_bump_dim_pitch(self) -> None: @@ -224,7 +224,7 @@ def test_bump_sort(self) -> None: BumpAssignment(name="VDD",no_connect=False,x=Decimal(203),y=Decimal(21),group=None,custom_cell=None), BumpAssignment(name="VSS",no_connect=False,x=Decimal(202),y=Decimal(20),group=None,custom_cell=None) ] - definition = BumpsDefinition(x=204,y=204,pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=0, global_y_offset=0, cell="bumpcell",assignments=assignments) + definition = BumpsDefinition(x=204,y=204,pitch_x=Decimal("1.23"), pitch_y=Decimal("3.14"), global_x_offset=Decimal('0'), global_y_offset=Decimal('0'), cell="bumpcell",assignments=assignments) idxs = [0, 3, 6, 2, 1] diff --git a/tests/utils/stackup.py b/tests/utils/stackup.py index 6ba6dffd2..f572ba15b 100644 --- a/tests/utils/stackup.py +++ b/tests/utils/stackup.py @@ -1,7 +1,7 @@ from decimal import Decimal from typing import List, Dict, Any -from hammer.tech.stackup import Metal, WidthSpacingTuple, Stackup +from hammer.tech.stackup import Metal, WidthSpacingTuple, Stackup, RoutingDirection from hammer.tech import Site from hammer.utils import coerce_to_grid @@ -47,7 +47,7 @@ def create_test_metal(index: int, grid_unit: Decimal) -> Metal: return Metal( name="M{}".format(index), index=index, - direction="vertical" if (index % 2 == 1) else "horizontal", + direction=RoutingDirection("vertical" if (index % 2 == 1) else "horizontal"), min_width=coerce_to_grid(StackupTestHelper.index_to_min_width_fn(index), grid_unit), pitch=coerce_to_grid(StackupTestHelper.index_to_min_pitch_fn(index), grid_unit), offset=coerce_to_grid(StackupTestHelper.index_to_offset_fn(index), grid_unit),