diff --git a/power/voltus/__init__.py b/power/voltus/__init__.py index 2e8c891..4211cfa 100755 --- a/power/voltus/__init__.py +++ b/power/voltus/__init__.py @@ -13,10 +13,12 @@ import errno import json -from hammer_utils import get_or_else, optional_map, coerce_to_grid, check_on_grid, lcm_grid -from hammer_vlsi import HammerPowerTool, HammerToolStep, MMMCCornerType, TimeValue +from hammer_config import HammerJSONEncoder +from hammer_utils import get_or_else, optional_map, coerce_to_grid, check_on_grid, lcm_grid, in_place_unique +from hammer_vlsi import HammerPowerTool, HammerToolStep, MMMCCorner, MMMCCornerType, TimeValue, VoltageValue from hammer_logging import HammerVLSILogging import hammer_tech +from specialcells import CellType import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../common")) @@ -38,6 +40,108 @@ def env_vars(self) -> Dict[str, str]: new_dict["VOLTUS_BIN"] = self.get_setting("power.voltus.voltus_bin") return new_dict + @property + def extra_corners_only(self) -> bool: + return self.get_setting("power.inputs.extra_corners_only") + + @property + def tech_stdcell_pgv_tcl(self) -> str: + return os.path.join(self.run_dir, "tech_stdcell_pgv.tcl") + + @property + def macro_pgv_tcl(self) -> str: + return os.path.join(self.run_dir, "macro_pgv.tcl") + + @property + def tech_lib_dir(self) -> str: + return os.path.join(self.technology.cache_dir, "tech_pgv") + + @property + def stdcell_lib_dir(self) -> str: + return os.path.join(self.technology.cache_dir, "stdcell_pgv") + + @property + def macro_lib_dir(self) -> str: + return os.path.join(self.technology.cache_dir, "macro_pgv") + + @property + def ran_tech_stdcell_pgv(self) -> bool: + """ init_technology sets this to True if tech/stdcell PG views were generated by Hammer or provided by PDK """ + return self.attr_getter("_ran_tech_stdcell_pgv", False) + + @ran_tech_stdcell_pgv.setter + def ran_tech_stdcell_pgv(self, val: bool) -> None: + self.attr_setter("_ran_tech_stdcell_pgv", val) + + @property + def gen_tech_stdcell_pgv(self) -> bool: + """ init_technology sets this to True if stdcell PG views need to be generated """ + return self.attr_getter("_gen_tech_stdcell_pgv", False) + + @gen_tech_stdcell_pgv.setter + def gen_tech_stdcell_pgv(self, val: bool) -> None: + self.attr_setter("_gen_tech_stdcell_pgv", val) + + @property + def ran_macro_pgv(self) -> bool: + """ init_technology sets this to True if macro PG views were generated by Hammer """ + return self.attr_getter("_ran_macro_pgv", False) + + @ran_macro_pgv.setter + def ran_macro_pgv(self, val: bool) -> None: + self.attr_setter("_ran_macro_pgv", val) + + @property + def gen_macro_pgv(self) -> bool: + """ init_technology sets this to True if macro PG views need to be generated """ + return self.attr_getter("_gen_macro_pgv", False) + + @gen_macro_pgv.setter + def gen_macro_pgv(self, val: bool) -> None: + self.attr_setter("_gen_macro_pgv", val) + + @property + def macro_pgv_cells(self) -> List[str]: + """ init_technology populates the list of macros to generate PG views for """ + return self.attr_getter("_macro_pgv_cells", []) + + @macro_pgv_cells.setter + def macro_pgv_cells(self, val: List[str]) -> None: + self.attr_setter("_macro_pgv_cells", val) + + def tech_lib_filter(self) -> List[Callable[[hammer_tech.Library], bool]]: + """ Filter only libraries from tech plugin """ + return [self.filter_for_tech_libs] + + def filter_for_tech_libs(self, lib: hammer_tech.Library) -> bool: + return lib in self.technology.tech_defined_libraries + + def extra_lib_filter(self) -> List[Callable[[hammer_tech.Library], bool]]: + """ Filter only libraries from vlsi.inputs.extra_libraries """ + return [self.filter_for_extra_libs] + + def filter_for_extra_libs(self, lib: hammer_tech.Library) -> bool: + return lib in list(map(lambda el: el.store_into_library(), self.technology.get_extra_libraries())) + + def get_mmmc_pgv(self, corner: MMMCCorner) -> List[str]: + return self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=[ + self.filter_for_mmmc(voltage=corner.voltage, temp=corner.temp)]) + + def get_mmmc_spice_models(self, corner: MMMCCorner) -> List[str]: + return self.technology.read_libs([hammer_tech.filters.spice_model_file_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=[ + self.filter_for_mmmc(voltage=corner.voltage, temp=corner.temp)]) + + def get_mmmc_spice_corners(self, corner: MMMCCorner) -> List[str]: + return self.technology.read_libs([hammer_tech.filters.spice_model_lib_corner_filter], + hammer_tech.HammerTechnologyUtils.to_plain_item, + extra_pre_filters=[ + self.filter_for_mmmc(voltage=corner.voltage, temp=corner.temp)], + must_exist=False) + @property def steps(self) -> List[HammerToolStep]: return self.make_steps_from_methods([ @@ -45,18 +149,238 @@ def steps(self) -> List[HammerToolStep]: self.init_design, self.static_power, self.active_power, - # self.static_rail, + self.static_rail, self.active_rail, self.run_voltus ]) - # TODO (daniel) library characterization def init_technology(self) -> bool: + corners = self.get_mmmc_corners() + + # Options for set_pg_library_mode + base_options = [] # type: List[str] + if self.get_setting("power.voltus.lef_layer_map"): + base_options.extend(["-lef_layer_map", self.get_setting("power.voltus.lef_layer_map")]) + + # Setup commands for each PG library run + base_cmds = ["set_db design_process_node {}".format(self.get_setting("vlsi.core.node"))] + base_cmds.append("set_multi_cpu_usage -local_cpu {}".format(self.get_setting("vlsi.core.max_threads"))) + + # First, check if tech plugin supplies power grid libraries + tech_pg_libs = self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.tech_lib_filter()) + tech_lib_lefs = self.technology.read_libs([hammer_tech.filters.lef_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.tech_lib_filter()) + if len(tech_pg_libs) > 0: + self.logger.info("Technology already provides PG libraries. Moving onto macro PG libraries.") + self.ran_tech_stdcell_pgv = True + # Else, characterize tech & stdcell libraries only once + elif not os.path.isdir(self.tech_lib_dir) or not os.path.isdir(self.stdcell_lib_dir): + self.logger.info("Generating techonly and stdcell PG libraries for the first time...") + ts_output = base_cmds.copy() + # Get only the tech-defined libraries + ts_output.append("read_physical -lef {{ {} }}".format(" ".join(tech_lib_lefs))) + + tech_options = base_options.copy() + tech_options.extend(["-cell_type", "techonly"]) + # Append list of fillers + stdfillers = self.technology.get_special_cell_by_type(CellType.StdFiller) + if len(stdfillers) > 0: + stdfillers_names = list(map(lambda f: str(f), stdfillers[0].name)) + tech_options.extend(["-filler_cells", "{{ {} }} ".format(" ".join(stdfillers_names))]) + decaps = self.technology.get_special_cell_by_type(CellType.Decap) + if len(decaps) > 0: + decaps_names = list(map(lambda d: str(d), decaps[0].name)) + tech_options.extend(["-decap_cells", "{{ {} }}".format(" ".join(decaps_names))]) + + if not corners: + # Start with tech-only library + options = tech_options.copy() + options.extend([ + "-extraction_tech_file", self.get_qrc_tech(), # TODO: this assumes only 1 exists in no corners case + "-default_power_voltage", str(VoltageValue(self.get_setting("vlsi.inputs.supplies.VDD")).value_in_units("V")) + ]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(self.tech_lib_dir)) + + # Next do stdcell library + options[options.index("techonly")] = "stdcells" + spice_models = self.technology.read_libs([hammer_tech.filters.spice_model_file_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + spice_corners = self.technology.read_libs([hammer_tech.filters.spice_model_lib_corner_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate stdcell PG libraries") + return True + else: + options.extend(["-spice_models", " ".join(spice_models)]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{", "} {".join(spice_corners), "}"]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(self.stdcell_lib_dir)) + + else: + for corner in corners: + # Start with tech-only library + options = tech_options.copy() + options.extend([ + "-extraction_tech_file", self.get_mmmc_qrc(corner), #TODO: QRC should be tied to stackup + "-default_power_voltage", str(corner.voltage.value), + "-temperature", str(corner.temp.value) + ]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(os.path.join(self.tech_lib_dir, corner.name))) + + # Next do stdcell library + options[options.index("techonly")] = "stdcells" + spice_models = self.get_mmmc_spice_models(corner) + spice_corners = self.get_mmmc_spice_corners(corner) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate stdcell PG libraries") + return True + else: + options.extend(["-spice_models", " ".join(spice_models)]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{", "} {".join(spice_corners), "}"]) + ts_output.append("set_pg_library_mode {}".format(" ".join(options))) + ts_output.append("write_pg_library -out_dir {}".format(os.path.join(self.stdcell_lib_dir, corner.name))) + + ts_output.append("exit") + with open(self.tech_stdcell_pgv_tcl, "w") as f: + f.write("\n".join(ts_output)) + self.gen_tech_stdcell_pgv = True + self.ran_tech_stdcell_pgv = True + else: + self.logger.info("techonly and stdcell PG libraries already generated, skipping...") + self.ran_tech_stdcell_pgv = True + + if self.get_setting("power.voltus.macro_pgv"): + m_output = base_cmds.copy() + # Characterize macro libraries once, unless list of extra libraries has been modified/changed + tech_lef = tech_lib_lefs[0] + extra_lib_lefs = self.technology.read_libs([hammer_tech.filters.lef_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + extra_lib_mtimes = list(map(lambda l: os.path.getmtime(l), extra_lib_lefs)) + extra_lib_lefs_mtimes = dict(zip(extra_lib_lefs, extra_lib_mtimes)) + extra_lib_lefs_json = os.path.join(self.run_dir, "extra_lib_lefs.json") + extra_pg_libs = self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + # TODO: Use some filters w/ LEFUtils to extract cells from LEFs, e.g. MacroSize instead of using name field + named_extra_libs = list(filter(lambda l: l.library.name is not None and l.library.power_grid_library not in extra_pg_libs, self.technology.get_extra_libraries())) # type: List[hammer_tech.ExtraLibrary] + + if not os.path.isdir(self.macro_lib_dir): + self.logger.info("Characterizing macros for the first time...") + # First time: characterize all cells + macros = list(map(lambda l: l.library.name, named_extra_libs)) + in_place_unique(macros) + self.macro_pgv_cells = macros + + # Write dict of extra library LEFs + with open(extra_lib_lefs_json, "w") as f: + f.write(json.dumps(extra_lib_lefs_mtimes, cls=HammerJSONEncoder, indent=4)) + else: + # Figure out which cells to re-characterize + prior_extra_lib_lefs = {} # type: Dict[str, str] + if os.path.exists(extra_lib_lefs_json): + with open(extra_lib_lefs_json, "r") as f: + prior_extra_lib_lefs = json.loads(f.read()) + # Write updated dict of extra library LEFs + with open(extra_lib_lefs_json, "w") as f: + f.write(json.dumps(extra_lib_lefs_mtimes, cls=HammerJSONEncoder, indent=4)) + # Get LEFs which have been created/modified, match cell names if provided + mod_lefs = dict(set(extra_lib_lefs_mtimes.items()) - set(prior_extra_lib_lefs.items())).keys() + rechar_libs = list(filter(lambda l: l.library.lef_file in mod_lefs, named_extra_libs)) + macros = list(map(lambda l: l.library.name, rechar_libs)) + in_place_unique(macros) + self.macro_pgv_cells = macros + + if len(self.macro_pgv_cells) > 0: + self.logger.info("Characterizing the following macros: {}".format(" ".join(self.macro_pgv_cells))) + # Write list of cells to characterize + cells_list = os.path.join(self.run_dir, "macro_cells.txt") + with open(cells_list, "w") as f: + f.write("\n".join(self.macro_pgv_cells)) + + macro_options = base_options.copy() + macro_options.extend([ + "-cell_type", "macros", + "-cells_file", cells_list + ]) + + # File checks + gds_map_file = self.get_gds_map_file() + if gds_map_file is None: + self.logger.error("Must have GDS layer map for macro PG library generation! Skipping.") + return True + else: + assert isinstance(gds_map_file, str) + macro_options.extend(["-stream_layer_map", gds_map_file]) + + extra_lib_sp = self.technology.read_libs([hammer_tech.filters.spice_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + if len(extra_lib_sp) == 0: + self.logger.error("Must have Spice netlists for macro PG library generation! Skipping.") + return True + else: + macro_options.extend(["-spice_subckts", "{{ {} }}".format(" ".join(extra_lib_sp))]) + + extra_lib_gds = self.technology.read_libs([hammer_tech.filters.gds_filter], hammer_tech.HammerTechnologyUtils.to_plain_item, self.extra_lib_filter()) + if len(extra_lib_gds) == 0: + self.logger.error("Must have GDS data for macro PG library generation! Skipping.") + return True + else: + macro_options.extend(["-stream_files", "{{ {} }}".format(" ".join(extra_lib_gds))]) + + m_output.append("read_physical -lef {{ {TECH_LEF} {EXTRA_LEFS} }}".format(TECH_LEF=tech_lef, EXTRA_LEFS=" ".join(extra_lib_lefs))) + + if not corners: + options = macro_options.copy() + options.extend([ + "-extraction_tech_file", self.get_qrc_tech(), # TODO: this assumes only 1 exists in no corners case + "-default_power_voltage", str(VoltageValue(self.get_setting("vlsi.inputs.supplies.VDD")).value_in_units("V")) + ]) + spice_models = self.technology.read_libs([hammer_tech.filters.spice_model_file_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + spice_corners = self.technology.read_libs([hammer_tech.filters.spice_model_lib_corner_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate macro PG libraries") + return True + else: + options.extend(["-spice_models", " ".join(spice_models)]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{", "} {".join(spice_corners), "}"]) + m_output.append("set_pg_library_mode {}".format(" ".join(options))) + m_output.append("write_pg_library -out_dir {}".format(os.path.join(self.macro_lib_dir, corner.name))) + + else: + for corner in corners: + options = macro_options.copy() + options.extend([ + "-extraction_tech_file", self.get_mmmc_qrc(corner), #TODO: QRC should be tied to stackup + "-default_power_voltage", str(corner.voltage.value), + "-temperature", str(corner.temp.value), + ]) + spice_models = self.get_mmmc_spice_models(corner) + spice_corners = self.get_mmmc_spice_corners(corner) + if len(spice_models) == 0: + self.logger.error("Must specify Spice model files in tech plugin to generate macro PG libraries") + return True + else: + options.extend(["-spice_models", " ".join(spice_models)]) + if len(spice_corners) > 0: + options.extend(["-spice_corners", "{", "} {".join(spice_corners), "}"]) + m_output.append("set_pg_library_mode {}".format(" ".join(options))) + m_output.append("write_pg_library -out_dir {}".format(os.path.join(self.macro_lib_dir, corner.name))) + + m_output.append("exit") + with open(self.macro_pgv_tcl, "w") as f: + f.write("\n".join(m_output)) + self.gen_macro_pgv = True + self.ran_macro_pgv = True + else: + self.logger.info("macro PG libraries already generated and macros have not changed, skipping...") + self.ran_macro_pgv = True + else: + self.logger.info("power.voltus.macro_pgv is False. Rail analysis will be incomplete over macro blocks.") + return True def init_design(self) -> bool: verbose_append = self.verbose_append + verbose_append("set_db design_process_node {}".format(self.get_setting("vlsi.core.node"))) verbose_append("set_multi_cpu_usage -local_cpu {}".format(self.get_setting("vlsi.core.max_threads"))) innovus_db = os.path.join(os.getcwd(), self.par_database) @@ -65,6 +389,8 @@ def init_design(self) -> bool: verbose_append("read_db {}".format(innovus_db)) + verbose_append("check_pg_shorts -out_file shorts.rpt") + # TODO (daniel) deal with multiple power domains for power_net in self.get_all_power_nets(): vdd_net = power_net.name @@ -125,13 +451,12 @@ def static_power(self) -> bool: # Report based on MMMC mode corners = self.get_mmmc_corners() - extra_corners_only = self.get_setting("power.inputs.extra_corners_only") if not corners: - if extra_corners_only: + if self.extra_corners_only: self.logger.warning("power.inputs.extra_corners_only not valid in non-MMMC mode! Reporting static power for default analysis view only.") verbose_append("report_power -out_dir staticPowerReports") else: - if extra_corners_only: + if self.extra_corners_only: extra_corners = list(filter(lambda c: c.type is MMMCCornerType.Extra, corners)) if len(extra_corners) == 0: self.logger.warning("power.inputs.extra_corners_only is true but no extra MMMC corners specified! Ignoring for static power.") @@ -161,13 +486,12 @@ def active_power(self) -> bool: # Check MMMC mode corners = self.get_mmmc_corners() - extra_corners_only = self.get_setting("power.inputs.extra_corners_only") if not corners: - if extra_corners_only: + if self.extra_corners_only: self.logger.warning("power.inputs.extra_corners_only not valid in non-MMMC mode! Reporting active power for default analysis view only.") verbose_append("report_power -out_dir activePowerReports") else: - if extra_corners_only: + if self.extra_corners_only: extra_corners = list(filter(lambda c: c.type is MMMCCornerType.Extra, corners)) if len(extra_corners) == 0: self.logger.warning("power.inputs.extra_corners_only is true but no extra MMMC corners specified! Ignoring for active power.") @@ -242,18 +566,123 @@ def active_power(self) -> bool: return True - def static_rail(self) -> bool: + def rail_analysis(self, method: str, power_dir: str, output_dir: Optional[str] = None) -> bool: + """ + Generic method for rail analysis + params: + - method: "static" or "dynamic" + - power_dir: relative path to static or active power current files + - output_dir: relative path to rail analysis output dir + """ verbose_append = self.verbose_append - # TODO (daniel) add more setting parameters - verbose_append("set_rail_anaylsis_config -analysis_view VIEW -method era_static -accuracy xd -extraction_techfile QRC_TECHFILE") - verbose_append("set_power_data -format current {FILE NAMES}") - verbose_append("report_rail -output_dir staticRailReports -type domain AO") + if not output_dir: + output_dir = method + "RailReports" + + # Decide accuracy based on existence of PGV libraries, unless overridden + accuracy = self.get_setting("power.voltus.rail_accuracy") + if not accuracy: + accuracy = "hd" if self.ran_tech_stdcell_pgv else "xd" # hd still works w/o macro PG views + + base_options = [ + "-method", method, + "-accuracy", accuracy, + "-process_techgen_em_rules", "true", + "-em_peak_analysis", "true", + "-enable_rlrp_analysis", "true", + "-gif_resolution", "high", + "-verbosity", "true" + ] + if method == "static": + base_options.extend(["-enable_sensitivity_analysis", "true"]) + + # TODO: Need combinations of all power nets + voltage domains + pg_nets = self.get_all_power_nets() + self.get_all_ground_nets() + # Report based on MMMC corners + corners = self.get_mmmc_corners() + if not corners: + if self.extra_corners_only: + self.logger.warning("power.inputs.extra_corners_only not valid in non-MMMC mode! Reporting rail analysis for default analysis view only.") + options = base_options.copy() + pg_libs = self.technology.read_libs([hammer_tech.filters.power_grid_library_filter], hammer_tech.HammerTechnologyUtils.to_plain_item) + if self.ran_tech_stdcell_pgv: + pg_libs.append(os.path.join(self.tech_lib_dir, "techonly.cl")) + pg_libs.append(os.path.join(self.stdcell_lib_dir, "stdcells.cl")) + if self.ran_macro_pgv: + pg_libs.extend(list(map(lambda l: os.path.join(self.macro_lib_dir, "macros_{}.cl".format(l)), self.macro_pgv_cells))) + if len(pg_libs) == 0: + self.logger.warning("No PG libraries are available! Rail analysis is skipped.") + return True + options.extend(["-power_grid_libraries", "{{ {} }}".format(" ".join(pg_libs))]) + verbose_append("set_rail_analysis_config {}".format(" ".join(options))) + # TODO: get nets and .ptiavg files using TCL from the .ptifiles file in the power reports directory + power_data = list(map(lambda n: "{POWER_DIR}/{METHOD}_{NET}.ptiavg".format( + POWER_DIR=power_dir, + METHOD=method, + NET=n.name), pg_nets)) + verbose_append("set_power_data -format current {{ {} }}".format(" ".join(power_data))) + verbose_append("report_rail -output_dir {} -type domain ALL".format(output_dir)) + # TODO: Find highest run number, increment by 1 to enable reporting IRdrop regions + else: + if self.extra_corners_only: + extra_corners = list(filter(lambda c: c.type is MMMCCornerType.Extra, corners)) + if len(extra_corners) == 0: + self.logger.warning("power.inputs.extra_corners_only is true but no extra MMMC corners specified! Ignoring for rail analysis.") + else: + corners = extra_corners + for corner in corners: + options = base_options.copy() + if corner.type is MMMCCornerType.Setup: + view_name = corner.name + ".setup_view" + elif corner.type is MMMCCornerType.Hold: + view_name = corner.name + ".hold_view" + elif corner.type is MMMCCornerType.Extra: + view_name = corner.name + ".extra_view" + else: + raise ValueError("Unsupported MMMCCornerType") + pg_libs = self.get_mmmc_pgv(corner) + if self.ran_tech_stdcell_pgv: + pg_libs.append(os.path.join(self.tech_lib_dir, corner.name, "techonly.cl")) + pg_libs.append(os.path.join(self.stdcell_lib_dir, corner.name, "stdcells.cl")) + if self.ran_macro_pgv: + pg_libs.extend(list(map(lambda l: os.path.join(self.macro_lib_dir, corner.name, "macros_{}.cl".format(l)), self.macro_pgv_cells))) + if len(pg_libs) == 0: + self.logger.warning("No PG libraries are available! Rail analysis is skipped.") + return True + + options.extend([ + "-power_grid_libraries", "{{ {} }}".format(" ".join(pg_libs)), + "-analysis_view", view_name, + "-temperature", str(corner.temp.value) + ]) + verbose_append("set_rail_analysis_config {}".format(" ".join(options))) + verbose_append("set_power_data -reset") + # TODO: get nets and .ptiavg files using TCL from the .ptifiles file in the power reports directory + power_data = list(map(lambda n: "{POWER_DIR}.{VIEW}/{METHOD}_{NET}.ptiavg".format( + POWER_DIR=power_dir, + VIEW=view_name, + METHOD=method, + NET=n.name), pg_nets)) + verbose_append("set_power_data -format current {{ {} }}".format(" ".join(power_data))) + verbose_append("report_rail -output_dir {} -type domain ALL".format(output_dir)) + # TODO: Find highest run number, increment by 1 to enable reporting IRdrop regions return True + def static_rail(self) -> bool: + return self.rail_analysis("static", "staticPowerReports") + def active_rail(self) -> bool: - return True + # Vectorless database + passed = self.rail_analysis("dynamic", "activePowerReports", "activeRailReports") + + # Vectorbased databases + for vcd_path in self.waveforms: + passed = self.rail_analysis("dynamic", "activePower." + os.path.basename(vcd_path), "activeRailReports." + os.path.basename(vcd_path)) + for saif_path in self.saifs: + saif_file=".".join(saif_path.split('/')[-2:]) + passed = self.rail_analysis("dynamic", "activePower." + saif_file, "activeRailReports." + saif_file) + return passed def run_voltus(self) -> bool: verbose_append = self.verbose_append @@ -269,16 +698,28 @@ def run_voltus(self) -> bool: f.write("\n".join(self.output)) # Build args - args = [ + base_args = [ self.get_setting("power.voltus.voltus_bin"), - "-init", power_tcl_filename, "-no_gui", - "-common_ui" + "-common_ui", + "-init" ] HammerVLSILogging.enable_colour = False HammerVLSILogging.enable_tag = False + # Run PG lib gen, if needed + if self.gen_tech_stdcell_pgv: + args = base_args.copy() + args.append(self.tech_stdcell_pgv_tcl) + self.run_executable(args, cwd=self.run_dir) + if self.gen_macro_pgv: + args = base_args.copy() + args.append(self.macro_pgv_tcl) + self.run_executable(args, cwd=self.run_dir) + + args = base_args.copy() + args.append(power_tcl_filename) self.run_executable(args, cwd=self.run_dir) HammerVLSILogging.enable_colour = True diff --git a/power/voltus/defaults.yml b/power/voltus/defaults.yml index bcef863..3547251 100644 --- a/power/voltus/defaults.yml +++ b/power/voltus/defaults.yml @@ -9,3 +9,19 @@ power.voltus: # Voltus version to use. # Used to locate the binary - e.g. the '181' in ${cadence.cadence_home}/VOLTUS/VOLTUS181/bin/voltus version: "181" + + # Optional LEF <-> QRC layer mapping file if layers within are mismatched + # Should be provided by the technology in a tab-separated 4-column format + # Column 1: layer type (metal, via) + # Column 2: layer name in QRC tech file + # Column 3: lefdef (exact string) + # Column 4: layer name in tech LEF file + lef_layer_map: null + + # If true, generate power grid views of the macros contained in vlsi.inputs.extra_libraries + # Each library is required to have a name. If the library has a power grid view provided, they are skipped. + macro_pgv: true + + # Accuracy for rail analysis (choices are "xd" and "hd") + # If left null, accuracy will be selected based on which PG views can be generated + rail_accuracy: null