From 6873c848559873f438317e7482f79e81ad37f9d0 Mon Sep 17 00:00:00 2001 From: Cameron Wade Date: Thu, 17 Aug 2023 13:52:53 -0300 Subject: [PATCH] Update all .py files to conform with Black syntax This commit uses the Black code formatter to ensure all .py files confrom to its syntax standard. --- create_archive.py | 17 +- data_processing/DB_to_Excel.py | 447 ++++-- data_processing/DatabaseUtil.py | 679 +++++---- data_processing/GraphVizFormats.py | 2 +- data_processing/GraphVizUtil.py | 319 ++-- data_processing/MakeGraphviz.py | 884 ++++++----- data_processing/MakeOutputPlots.py | 749 ++++++---- temoa_model/ReferenceModel.py | 2 +- temoa_model/__main__.py | 3 +- temoa_model/get_region.py | 10 +- temoa_model/pformat_results.py | 1672 ++++++++++++--------- temoa_model/temoa_config.py | 1061 +++++++------ temoa_model/temoa_initialize.py | 2206 +++++++++++++++------------- temoa_model/temoa_mga.py | 116 +- temoa_model/temoa_model.py | 85 +- temoa_model/temoa_myopic.py | 860 ++++++++--- temoa_model/temoa_rules.py | 1266 ++++++++-------- temoa_model/temoa_run.py | 1091 ++++++++------ temoa_model/temoa_stochastic.py | 140 +- 19 files changed, 6598 insertions(+), 5011 deletions(-) diff --git a/create_archive.py b/create_archive.py index 5ffe3be7..cd3268e4 100755 --- a/create_archive.py +++ b/create_archive.py @@ -31,18 +31,19 @@ # Ensure compatibility with Python 2.7 and 3 try: from cStringIO import StringIO + temoa_pkg = StringIO() except ImportError: from io import BytesIO - temoa_pkg = BytesIO() -with PyZipFile( temoa_pkg, mode='w', compression=ZIP_DEFLATED ) as zf: - zf.debug = 3 - zf.writepy( 'temoa_model/' ) + temoa_pkg = BytesIO() -fname = 'temoa.py' -with open( fname, 'wb' ) as f: - f.write( temoa_pkg.getvalue() ) +with PyZipFile(temoa_pkg, mode="w", compression=ZIP_DEFLATED) as zf: + zf.debug = 3 + zf.writepy("temoa_model/") -os.chmod( fname, stat.S_IRWXU ) +fname = "temoa.py" +with open(fname, "wb") as f: + f.write(temoa_pkg.getvalue()) +os.chmod(fname, stat.S_IRWXU) diff --git a/data_processing/DB_to_Excel.py b/data_processing/DB_to_Excel.py index 7368b818..4d7c6310 100644 --- a/data_processing/DB_to_Excel.py +++ b/data_processing/DB_to_Excel.py @@ -6,173 +6,300 @@ import xlsxwriter from pyam import IamDataFrame + def make_excel(ifile, ofile, scenario): - - if ifile is None : - raise "You did not specify the input file, remember to use '-i' option" - print("Use as :\n python DB_to_Excel.py -i (Optional -o )\n Use -h for help.") - sys.exit(2) - else : - file_type = re.search(r"(\w+)\.(\w+)\b", ifile) # Extract the input filename and extension - if not file_type : - print("The file type %s is not recognized. Use a db file." % ifile) - sys.exit(2) - if ofile is None : - ofile = file_type.group(1) - print("Look for output in %s_*.xls" % ofile) - - con = sqlite3.connect(ifile) - cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database - con.text_factory = str #this ensures data is explored with the correct UTF-8 encoding - scenario = scenario.pop() - writer = pd.ExcelWriter(ofile+'.xlsx', engine = 'xlsxwriter', engine_kwargs = {'options':{'strings_to_formulas': False}}) - - workbook = writer.book - - header_format = workbook.add_format({'bold': True,'text_wrap': True,'align': 'left',}) - - query = "SELECT DISTINCT Efficiency.regions, Efficiency.tech, technologies.sector FROM Efficiency \ + if ifile is None: + raise "You did not specify the input file, remember to use '-i' option" + print( + "Use as :\n python DB_to_Excel.py -i (Optional -o )\n Use -h for help." + ) + sys.exit(2) + else: + file_type = re.search( + r"(\w+)\.(\w+)\b", ifile + ) # Extract the input filename and extension + if not file_type: + print("The file type %s is not recognized. Use a db file." % ifile) + sys.exit(2) + if ofile is None: + ofile = file_type.group(1) + print("Look for output in %s_*.xls" % ofile) + + con = sqlite3.connect(ifile) + cur = ( + con.cursor() + ) # a database cursor is a control structure that enables traversal over the records in a database + con.text_factory = ( + str # this ensures data is explored with the correct UTF-8 encoding + ) + scenario = scenario.pop() + writer = pd.ExcelWriter( + ofile + ".xlsx", + engine="xlsxwriter", + engine_kwargs={"options": {"strings_to_formulas": False}}, + ) + + workbook = writer.book + + header_format = workbook.add_format( + { + "bold": True, + "text_wrap": True, + "align": "left", + } + ) + + query = "SELECT DISTINCT Efficiency.regions, Efficiency.tech, technologies.sector FROM Efficiency \ INNER JOIN technologies ON Efficiency.tech=technologies.tech" - all_techs = pd.read_sql_query(query, con) - - query = "SELECT regions, tech, sector, t_periods, capacity FROM Output_CapacityByPeriodAndTech WHERE scenario='" + scenario + "'" - df_capacity = pd.read_sql_query(query, con) - for sector in sorted(df_capacity['sector'].unique()): - df_capacity_sector = df_capacity[df_capacity['sector']==sector] - df_capacity_sector = df_capacity_sector.drop(columns=['sector']).pivot_table(values='capacity', index=['regions', 'tech'], columns='t_periods') - df_capacity_sector.reset_index(inplace=True) - sector_techs = all_techs[all_techs['sector']==sector] - df_capacity_sector = pd.merge(sector_techs[['regions','tech']], df_capacity_sector, on=['regions','tech'], how='left') - df_capacity_sector.rename(columns={'regions':'Region','tech':'Technology'}, inplace=True) - df_capacity_sector.to_excel(writer, sheet_name='Capacity_' + sector, index=False, encoding='utf-8', startrow=1, header=False) - worksheet = writer.sheets['Capacity_' + sector] - worksheet.set_column('A:A', 10) - worksheet.set_column('B:B', 10) - for col, val in enumerate(df_capacity_sector.columns.values): - worksheet.write(0, col, val, header_format) - - query = "SELECT regions, tech, sector, t_periods, sum(vflow_out) as vflow_out FROM Output_VFlow_Out WHERE scenario='" + scenario + "' GROUP BY \ + all_techs = pd.read_sql_query(query, con) + + query = ( + "SELECT regions, tech, sector, t_periods, capacity FROM Output_CapacityByPeriodAndTech WHERE scenario='" + + scenario + + "'" + ) + df_capacity = pd.read_sql_query(query, con) + for sector in sorted(df_capacity["sector"].unique()): + df_capacity_sector = df_capacity[df_capacity["sector"] == sector] + df_capacity_sector = df_capacity_sector.drop(columns=["sector"]).pivot_table( + values="capacity", index=["regions", "tech"], columns="t_periods" + ) + df_capacity_sector.reset_index(inplace=True) + sector_techs = all_techs[all_techs["sector"] == sector] + df_capacity_sector = pd.merge( + sector_techs[["regions", "tech"]], + df_capacity_sector, + on=["regions", "tech"], + how="left", + ) + df_capacity_sector.rename( + columns={"regions": "Region", "tech": "Technology"}, inplace=True + ) + df_capacity_sector.to_excel( + writer, + sheet_name="Capacity_" + sector, + index=False, + encoding="utf-8", + startrow=1, + header=False, + ) + worksheet = writer.sheets["Capacity_" + sector] + worksheet.set_column("A:A", 10) + worksheet.set_column("B:B", 10) + for col, val in enumerate(df_capacity_sector.columns.values): + worksheet.write(0, col, val, header_format) + + query = ( + "SELECT regions, tech, sector, t_periods, sum(vflow_out) as vflow_out FROM Output_VFlow_Out WHERE scenario='" + + scenario + + "' GROUP BY \ regions, tech, sector, t_periods" - df_activity = pd.read_sql_query(query, con) - for sector in sorted(df_activity['sector'].unique()): - df_activity_sector = df_activity[df_activity['sector']==sector] - df_activity_sector = df_activity_sector.drop(columns=['sector']).pivot_table(values='vflow_out', index=['regions', 'tech'], columns='t_periods') - df_activity_sector.reset_index(inplace=True) - sector_techs = all_techs[all_techs['sector']==sector] - df_activity_sector = pd.merge(sector_techs[['regions','tech']], df_activity_sector, on=['regions','tech'], how='left') - df_activity_sector.rename(columns={'regions':'Region','tech':'Technology'}, inplace=True) - df_activity_sector.to_excel(writer, sheet_name='Activity_' + sector, index=False, encoding='utf-8', startrow=1, header=False) - worksheet = writer.sheets['Activity_' + sector] - worksheet.set_column('A:A', 10) - worksheet.set_column('B:B', 10) - for col, val in enumerate(df_activity_sector.columns.values): - worksheet.write(0, col, val, header_format) - - query = "SELECT DISTINCT EmissionActivity.regions, EmissionActivity.tech, EmissionActivity.emis_comm as emissions_comm, technologies.sector FROM EmissionActivity \ + ) + df_activity = pd.read_sql_query(query, con) + for sector in sorted(df_activity["sector"].unique()): + df_activity_sector = df_activity[df_activity["sector"] == sector] + df_activity_sector = df_activity_sector.drop(columns=["sector"]).pivot_table( + values="vflow_out", index=["regions", "tech"], columns="t_periods" + ) + df_activity_sector.reset_index(inplace=True) + sector_techs = all_techs[all_techs["sector"] == sector] + df_activity_sector = pd.merge( + sector_techs[["regions", "tech"]], + df_activity_sector, + on=["regions", "tech"], + how="left", + ) + df_activity_sector.rename( + columns={"regions": "Region", "tech": "Technology"}, inplace=True + ) + df_activity_sector.to_excel( + writer, + sheet_name="Activity_" + sector, + index=False, + encoding="utf-8", + startrow=1, + header=False, + ) + worksheet = writer.sheets["Activity_" + sector] + worksheet.set_column("A:A", 10) + worksheet.set_column("B:B", 10) + for col, val in enumerate(df_activity_sector.columns.values): + worksheet.write(0, col, val, header_format) + + query = "SELECT DISTINCT EmissionActivity.regions, EmissionActivity.tech, EmissionActivity.emis_comm as emissions_comm, technologies.sector FROM EmissionActivity \ INNER JOIN technologies ON EmissionActivity.tech=technologies.tech" - all_emis_techs = pd.read_sql_query(query, con) + all_emis_techs = pd.read_sql_query(query, con) - query = "SELECT regions, tech, sector, t_periods, emissions_comm, sum(emissions) as emissions FROM Output_Emissions WHERE scenario='" + scenario + "' GROUP BY \ + query = ( + "SELECT regions, tech, sector, t_periods, emissions_comm, sum(emissions) as emissions FROM Output_Emissions WHERE scenario='" + + scenario + + "' GROUP BY \ regions, tech, sector, t_periods, emissions_comm" - df_emissions_raw = pd.read_sql_query(query, con) - df_emissions = df_emissions_raw.pivot_table(values='emissions', index=['regions', 'tech', 'sector','emissions_comm'], columns='t_periods') - df_emissions.reset_index(inplace=True) - df_emissions = pd.merge(all_emis_techs, df_emissions, on=['regions','tech', 'sector', 'emissions_comm'], how='left') - df_emissions.rename(columns={'regions':'Region', 'tech':'Technology', 'emissions_comm':'Emission Commodity', 'sector':'Sector'}, inplace=True) - df_emissions.to_excel(writer, sheet_name='Emissions', index=False, encoding='utf-8', startrow=1, header=False) - worksheet = writer.sheets['Emissions'] - worksheet.set_column('A:A', 10) - worksheet.set_column('B:B', 10) - worksheet.set_column('C:C', 10) - worksheet.set_column('D:D', 20) - for col, val in enumerate(df_emissions.columns.values): - worksheet.write(0, col, val, header_format) - - query = "SELECT regions, tech, sector, output_name, vintage, output_cost FROM Output_Costs WHERE output_name LIKE '%V_Discounted%' AND scenario='" + scenario + "'" - df_costs = pd.read_sql_query(query, con) - df_costs.columns = ['Region', 'Technology', 'Sector','Output Name', 'Vintage', 'Cost'] - df_costs.to_excel(writer, sheet_name='Costs', index=False, encoding='utf-8', startrow=1, header=False) - worksheet = writer.sheets['Costs'] - worksheet.set_column('A:A', 10) - worksheet.set_column('B:B', 10) - worksheet.set_column('C:C', 10) - worksheet.set_column('D:D', 30) - for col, val in enumerate(df_costs.columns.values): - worksheet.write(0, col, val, header_format) - - writer.save() - - #prepare results for IamDataFrame - df_emissions_raw['scenario']=scenario - df_emissions_raw['unit']='?' - df_emissions_raw['variable']='Emissions|' + df_emissions_raw['emissions_comm'] + '|' + df_emissions_raw['tech'] - df_emissions_raw.rename(columns={'t_periods':'year', 'emissions':'value', 'regions':'region'}, inplace=True) - - df_capacity['scenario'] = scenario - df_capacity['unit']='?' - df_capacity['variable']='Capacity|' + df_capacity['sector'] + '|' + df_capacity['tech'] - df_capacity.rename(columns={'t_periods':'year', 'capacity':'value', 'regions':'region'}, inplace=True) - - df_activity['scenario'] = scenario - df_activity['unit']='?' - df_activity['variable']='Activity|' + df_activity['sector'] + '|' + df_activity['tech'] - df_activity.rename(columns={'t_periods':'year', 'vflow_out':'value', 'regions':'region'}, inplace=True) - - - # cast results to IamDataFrame and write to xlsx - columns = ['scenario', 'region', 'variable', 'year', 'value', 'unit'] - _results = pd.concat([df_emissions_raw[columns], df_activity[columns], df_capacity[columns]]) - df = IamDataFrame(_results, - model='Temoa') - - emiss = df_emissions_raw['emissions_comm'].unique() - sector = df_capacity['sector'].unique() - - # adding aggregates of emissions for each species - df.aggregate([f'Emissions|{q}' for q in emiss], append=True) - - # adding aggregates of activity/capacity for each sector - prod = itertools.product(['Activity', 'Capacity'], sector) - df.aggregate([f'{t}|{s}' for t, s in prod], append=True) - - # write IamDataFrame to xlsx - df.to_excel(ofile+'_pyam.xlsx') - - cur.close() - con.close() + ) + df_emissions_raw = pd.read_sql_query(query, con) + df_emissions = df_emissions_raw.pivot_table( + values="emissions", + index=["regions", "tech", "sector", "emissions_comm"], + columns="t_periods", + ) + df_emissions.reset_index(inplace=True) + df_emissions = pd.merge( + all_emis_techs, + df_emissions, + on=["regions", "tech", "sector", "emissions_comm"], + how="left", + ) + df_emissions.rename( + columns={ + "regions": "Region", + "tech": "Technology", + "emissions_comm": "Emission Commodity", + "sector": "Sector", + }, + inplace=True, + ) + df_emissions.to_excel( + writer, + sheet_name="Emissions", + index=False, + encoding="utf-8", + startrow=1, + header=False, + ) + worksheet = writer.sheets["Emissions"] + worksheet.set_column("A:A", 10) + worksheet.set_column("B:B", 10) + worksheet.set_column("C:C", 10) + worksheet.set_column("D:D", 20) + for col, val in enumerate(df_emissions.columns.values): + worksheet.write(0, col, val, header_format) + + query = ( + "SELECT regions, tech, sector, output_name, vintage, output_cost FROM Output_Costs WHERE output_name LIKE '%V_Discounted%' AND scenario='" + + scenario + + "'" + ) + df_costs = pd.read_sql_query(query, con) + df_costs.columns = [ + "Region", + "Technology", + "Sector", + "Output Name", + "Vintage", + "Cost", + ] + df_costs.to_excel( + writer, + sheet_name="Costs", + index=False, + encoding="utf-8", + startrow=1, + header=False, + ) + worksheet = writer.sheets["Costs"] + worksheet.set_column("A:A", 10) + worksheet.set_column("B:B", 10) + worksheet.set_column("C:C", 10) + worksheet.set_column("D:D", 30) + for col, val in enumerate(df_costs.columns.values): + worksheet.write(0, col, val, header_format) + + writer.save() + + # prepare results for IamDataFrame + df_emissions_raw["scenario"] = scenario + df_emissions_raw["unit"] = "?" + df_emissions_raw["variable"] = ( + "Emissions|" + + df_emissions_raw["emissions_comm"] + + "|" + + df_emissions_raw["tech"] + ) + df_emissions_raw.rename( + columns={"t_periods": "year", "emissions": "value", "regions": "region"}, + inplace=True, + ) + + df_capacity["scenario"] = scenario + df_capacity["unit"] = "?" + df_capacity["variable"] = ( + "Capacity|" + df_capacity["sector"] + "|" + df_capacity["tech"] + ) + df_capacity.rename( + columns={"t_periods": "year", "capacity": "value", "regions": "region"}, + inplace=True, + ) + + df_activity["scenario"] = scenario + df_activity["unit"] = "?" + df_activity["variable"] = ( + "Activity|" + df_activity["sector"] + "|" + df_activity["tech"] + ) + df_activity.rename( + columns={"t_periods": "year", "vflow_out": "value", "regions": "region"}, + inplace=True, + ) + + # cast results to IamDataFrame and write to xlsx + columns = ["scenario", "region", "variable", "year", "value", "unit"] + _results = pd.concat( + [df_emissions_raw[columns], df_activity[columns], df_capacity[columns]] + ) + df = IamDataFrame(_results, model="Temoa") + + emiss = df_emissions_raw["emissions_comm"].unique() + sector = df_capacity["sector"].unique() + + # adding aggregates of emissions for each species + df.aggregate([f"Emissions|{q}" for q in emiss], append=True) + + # adding aggregates of activity/capacity for each sector + prod = itertools.product(["Activity", "Capacity"], sector) + df.aggregate([f"{t}|{s}" for t, s in prod], append=True) + + # write IamDataFrame to xlsx + df.to_excel(ofile + "_pyam.xlsx") + + cur.close() + con.close() def get_data(inputs): + ifile = None + ofile = None + scenario = set() + + if inputs is None: + raise "no arguments found" + + for opt, arg in inputs.items(): + if opt in ("-i", "--input"): + ifile = arg + elif opt in ("-o", "--output"): + ofile = arg + elif opt in ("-s", "--scenario"): + scenario.add(arg) + elif opt in ("-h", "--help"): + print( + "Use as :\n python DB_to_Excel.py -i (Optional -o )\n Use -h for help." + ) + sys.exit() + + make_excel(ifile, ofile, scenario) + + +if __name__ == "__main__": + try: + argv = sys.argv[1:] + opts, args = getopt.getopt( + argv, "hi:o:s:", ["help", "input=", "output=", "scenario="] + ) + except getopt.GetoptError: + print( + "Something's Wrong. Use as :\n python DB_to_Excel.py -i (Optional -o )\n Use -h for help." + ) + sys.exit(2) + + print(opts) - ifile = None - ofile = None - scenario = set() - - if inputs is None: - raise "no arguments found" - - for opt, arg in inputs.items(): - if opt in ("-i", "--input"): - ifile = arg - elif opt in ("-o", "--output"): - ofile = arg - elif opt in ("-s", "--scenario"): - scenario.add(arg) - elif opt in ("-h", "--help") : - print("Use as :\n python DB_to_Excel.py -i (Optional -o )\n Use -h for help.") - sys.exit() - - make_excel(ifile, ofile, scenario) - -if __name__ == "__main__": - - try: - argv = sys.argv[1:] - opts, args = getopt.getopt(argv, "hi:o:s:", ["help", "input=", "output=", "scenario="]) - except getopt.GetoptError: - print("Something's Wrong. Use as :\n python DB_to_Excel.py -i (Optional -o )\n Use -h for help.") - sys.exit(2) - - print(opts) - - get_data( dict(opts) ) \ No newline at end of file + get_data(dict(opts)) diff --git a/data_processing/DatabaseUtil.py b/data_processing/DatabaseUtil.py index d6dba1e5..73a77819 100644 --- a/data_processing/DatabaseUtil.py +++ b/data_processing/DatabaseUtil.py @@ -6,292 +6,352 @@ class DatabaseUtil(object): - def __init__(self, databasePath, scenario=None): - self.database = os.path.abspath(databasePath) - self.scenario = scenario - if not os.path.exists(self.database): - raise ValueError("The database file path doesn't exist") - - if self.isDataBaseFile(self.database): - try: - self.con = sqlite3.connect(self.database) - self.cur = self.con.cursor() - self.con.text_factory = str # this ensures data is explored with the correct UTF-8 encoding - except Exception as e: - raise ValueError('Unable to connect to database') - elif self.database.endswith('.dat'): - self.con = None - self.cur = None - - def close(self): - if (self.cur): - self.cur.close() - if (self.con): - self.con.close() - - @staticmethod - def isDataBaseFile(file): - if file.endswith('.db') or file.endswith('.sqlite') or file.endswith('.sqlite3'): - return True - else: - return False - - def readFromDatFile(self, inp_comm, inp_tech): - if (not self.cur is None): - raise ValueError("Invalid Operation For Database file") - if inp_comm is None and inp_tech is None : - inp_comm = "\w+" - inp_tech = "\w+" - else : - if inp_comm is None : - inp_comm = "\W+" - if inp_tech is None : - inp_tech = "\W+" - - test2 = [] - eff_flag = False - with open (self.database) as f: - for line in f: - if eff_flag is False and re.search("^\s*param\s+efficiency\s*[:][=]", line, flags = re.I) : - #Search for the line param Efficiency := (The script recognizes the commodities specified in this section) - eff_flag = True - elif eff_flag: - line = re.sub("[#].*$", " ", line) - if re.search("^\s*;\s*$", line) : - break # Finish searching this section when encounter a ';' - if re.search("^\s+$", line) : - continue - line = re.sub("^\s+|\s+$", "", line) - row = re.split("\s+", line) - if not re.search(inp_comm, row[0]) and not re.search(inp_comm, row[3]) and not re.search(inp_tech, row[1]) : - continue - - test2.append(tuple(row)) - - result = pd.DataFrame(test2, columns = ['input_comm', 'tech', 'period', 'output_comm', 'flow']) - return result[['input_comm', 'tech', 'output_comm']] - - - def getTimePeridosForFlags(self, flags=[]): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - query = '' - if (flags is None) or (not flags): - query = "SELECT t_periods FROM time_periods" - else: - flag = flags[0] - query = "SELECT t_periods FROM time_periods WHERE flag is '"+flag+"'" - for i in range(1, len(flags)): - query += " OR flag is '"+flags[i]+"'" - - self.cur.execute(query) - result = set() - for row in self.cur: - result.add(int(row[0])) - - return result - - def getTechnologiesForFlags(self, flags=[]): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - query = '' - if (flags is None) or (not flags): - query = "SELECT tech FROM technologies" - else: - flag = flags[0] - query = "SELECT tech FROM technologies WHERE flag='"+flag+"'" - for i in range(1, len(flags)): - query += " OR flag='"+flags[i]+"'" - - result = set() - for row in self.cur.execute(query): - result.add(row[0]) - - return result - - # TODO: Merge this with next function (getExistingTechnologiesForCommodity) - def getCommoditiesAndTech(self, inp_comm, inp_tech, region): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - if inp_comm is None and inp_tech is None : - inp_comm = "NOT NULL" - inp_tech = "NOT NULL" - else : - if inp_comm is None : - inp_comm = "NULL" - else : - inp_comm = "'"+inp_comm+"'" - if inp_tech is None : - inp_tech = "NULL" - else : - inp_tech = "'"+inp_tech+"'" - - if region==None: - self.cur.execute("SELECT input_comm, tech, output_comm FROM Efficiency WHERE input_comm is "+inp_comm+" or output_comm is "+inp_comm+" or tech is "+inp_tech) - else: - self.cur.execute("SELECT input_comm, tech, output_comm FROM Efficiency WHERE regions LIKE '%"+region+"%' and (input_comm is "+inp_comm+" or output_comm is "+inp_comm+" or tech is "+inp_tech+")") - return pd.DataFrame(self.cur.fetchall(), columns = ['input_comm', 'tech', 'output_comm']) - - def getExistingTechnologiesForCommodity(self, comm, region, comm_type='input'): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - query = '' - if (comm_type == 'input'): - query = "SELECT DISTINCT tech FROM Efficiency WHERE input_comm is '"+comm+"'" - else: - query = "SELECT DISTINCT tech FROM Efficiency WHERE output_comm is '"+comm+"'" - if region: - query +=" AND regions LIKE '%" + region + "%'" - - self.cur.execute(query) - result = pd.DataFrame(self.cur.fetchall(), columns = ['tech']) - return result - - - def getCommoditiesForFlags(self, flags=[]): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - query = '' - if (flags is None) or (not flags): - query = "SELECT comm_name FROM commodities" - else: - flag = flags[0] - query = "SELECT comm_name FROM commodities WHERE flag is '"+flag+"'" - for i in range(1, len(flags)): - query += " OR flag is '"+flags[i]+"'" - - result = set() - for row in self.cur.execute(query): - result.add(row[0]) - - return result - - # comm_type can be 'input' or 'output' - def getCommoditiesByTechnology(self, region, comm_type='input'): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - query = '' - if (comm_type == 'input'): - query = 'SELECT DISTINCT input_comm, tech FROM Efficiency' - elif (comm_type == 'output'): - query = 'SELECT DISTINCT tech, output_comm FROM Efficiency' - else: - raise ValueError('Invalid commodity comm_type: can only be input or output') - - if region: - query += " WHERE regions LIKE '%" + region + "%'" - result = set() - for row in self.cur.execute(query): - result.add((row[0], row[1])) - - return result - - def getCapacityForTechAndPeriod(self, tech = None, period = None, region = None): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - if self.scenario is None or self.scenario == '': - raise ValueError('For Output related queries, please set a scenario first') - - columns = [] - if tech is None: - columns.append('tech') - if period is None: - columns.append('t_periods') - columns.append('capacity') - columns.append('regions') - - query = "SELECT "+columns[0] - - for col in columns[1:]: - query += ", " + col - - query += " FROM Output_CapacityByPeriodAndTech WHERE scenario == '"+self.scenario+"'" - - if (region): - query += " AND regions LIKE '" + region + "%'" - if (tech): - query += " AND tech is '"+tech+"'" - if (period): - query += " AND t_periods == '"+str(period)+"'" - - self.cur.execute(query) - result = pd.DataFrame(self.cur.fetchall(), columns=columns) - if region is None: - mask = result['regions'].str.contains('-') - result.loc[mask, 'capacity'] /=2 - - result.drop(columns=['regions'], inplace=True) - if (len(columns) == 2): - return result.sum() - else: - return result.groupby(by='tech').sum().reset_index() - - def getOutputFlowForPeriod(self, period, region, comm_type='input', commodity=None): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - if self.scenario is None or self.scenario == '': - raise ValueError('For Output related queries, please set a scenario first') - columns = [] - table = '' - col = '' - if (comm_type =='input'): - table = 'Output_VFlow_In' - if (commodity is None): - columns.append('input_comm') - col = 'vflow_in' - columns.append('tech') - if (comm_type== 'output'): - table = 'Output_VFlow_Out' - if (commodity is None): - columns.append('output_comm') - col = 'vflow_out' - - query = "SELECT DISTINCT " - for c in columns: - query += c+", " - query += 'SUM('+col+") AS flow FROM "+table+" WHERE scenario is '"+self.scenario+"'" - if (region) and (comm_type=='input'): - query += " AND regions LIKE '" + region + "%'" - if (region) and (comm_type=='output'): - query += " AND regions LIKE '%" + region + "'" - query += " AND t_periods is '"+str(period)+"' " - - query2 = " GROUP BY tech" - if (not commodity is None): - query += ' AND '+comm_type+"_comm is '"+commodity+"'" - if (comm_type == 'output'): - query += " AND input_comm != 'ethos' " - else: - query2 += ", "+comm_type+'_comm' - - query += query2 - columns.append('flow') - self.cur.execute(query) - result = pd.DataFrame(self.cur.fetchall(), columns=columns) - return result - - def getEmissionsActivityForPeriod(self, period, region): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - if self.scenario is None or self.scenario == '': - raise ValueError('For Output related queries, please set a scenario first') - query = "SELECT E.emis_comm, E.tech, SUM(E.emis_act*O.vflow_out) FROM EmissionActivity E, Output_VFlow_Out O " + \ - "WHERE E.input_comm == O.input_comm AND E.tech == O.tech AND E.vintage == O.vintage AND E.output_comm == O.output_comm AND O.scenario == '"+ self.scenario +"' " + \ - "and O.t_periods == '"+str(period) + "'" - if (region): - query += " AND E.regions LIKE '%" + region + "%'" - query +=" GROUP BY E.tech, E.emis_comm" - self.cur.execute(query) - result = pd.DataFrame(self.cur.fetchall(), columns=['emis_comm', 'tech', 'emis_activity']) - return result - - def getCommodityWiseInputAndOutputFlow(self, tech, period, region): - if (self.cur is None): - raise ValueError("Invalid Operation For dat file") - if self.scenario is None or self.scenario == '': - raise ValueError('For Output related queries, please set a scenario first') - - query = "SELECT OF.input_comm, OF.output_comm, OF.vintage, OF.regions,\ + def __init__(self, databasePath, scenario=None): + self.database = os.path.abspath(databasePath) + self.scenario = scenario + if not os.path.exists(self.database): + raise ValueError("The database file path doesn't exist") + + if self.isDataBaseFile(self.database): + try: + self.con = sqlite3.connect(self.database) + self.cur = self.con.cursor() + self.con.text_factory = ( + str # this ensures data is explored with the correct UTF-8 encoding + ) + except Exception as e: + raise ValueError("Unable to connect to database") + elif self.database.endswith(".dat"): + self.con = None + self.cur = None + + def close(self): + if self.cur: + self.cur.close() + if self.con: + self.con.close() + + @staticmethod + def isDataBaseFile(file): + if ( + file.endswith(".db") + or file.endswith(".sqlite") + or file.endswith(".sqlite3") + ): + return True + else: + return False + + def readFromDatFile(self, inp_comm, inp_tech): + if not self.cur is None: + raise ValueError("Invalid Operation For Database file") + if inp_comm is None and inp_tech is None: + inp_comm = "\w+" + inp_tech = "\w+" + else: + if inp_comm is None: + inp_comm = "\W+" + if inp_tech is None: + inp_tech = "\W+" + + test2 = [] + eff_flag = False + with open(self.database) as f: + for line in f: + if eff_flag is False and re.search( + "^\s*param\s+efficiency\s*[:][=]", line, flags=re.I + ): + # Search for the line param Efficiency := (The script recognizes the commodities specified in this section) + eff_flag = True + elif eff_flag: + line = re.sub("[#].*$", " ", line) + if re.search("^\s*;\s*$", line): + break # Finish searching this section when encounter a ';' + if re.search("^\s+$", line): + continue + line = re.sub("^\s+|\s+$", "", line) + row = re.split("\s+", line) + if ( + not re.search(inp_comm, row[0]) + and not re.search(inp_comm, row[3]) + and not re.search(inp_tech, row[1]) + ): + continue + + test2.append(tuple(row)) + + result = pd.DataFrame( + test2, columns=["input_comm", "tech", "period", "output_comm", "flow"] + ) + return result[["input_comm", "tech", "output_comm"]] + + def getTimePeridosForFlags(self, flags=[]): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + query = "" + if (flags is None) or (not flags): + query = "SELECT t_periods FROM time_periods" + else: + flag = flags[0] + query = "SELECT t_periods FROM time_periods WHERE flag is '" + flag + "'" + for i in range(1, len(flags)): + query += " OR flag is '" + flags[i] + "'" + + self.cur.execute(query) + result = set() + for row in self.cur: + result.add(int(row[0])) + + return result + + def getTechnologiesForFlags(self, flags=[]): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + query = "" + if (flags is None) or (not flags): + query = "SELECT tech FROM technologies" + else: + flag = flags[0] + query = "SELECT tech FROM technologies WHERE flag='" + flag + "'" + for i in range(1, len(flags)): + query += " OR flag='" + flags[i] + "'" + + result = set() + for row in self.cur.execute(query): + result.add(row[0]) + + return result + + # TODO: Merge this with next function (getExistingTechnologiesForCommodity) + def getCommoditiesAndTech(self, inp_comm, inp_tech, region): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + if inp_comm is None and inp_tech is None: + inp_comm = "NOT NULL" + inp_tech = "NOT NULL" + else: + if inp_comm is None: + inp_comm = "NULL" + else: + inp_comm = "'" + inp_comm + "'" + if inp_tech is None: + inp_tech = "NULL" + else: + inp_tech = "'" + inp_tech + "'" + + if region == None: + self.cur.execute( + "SELECT input_comm, tech, output_comm FROM Efficiency WHERE input_comm is " + + inp_comm + + " or output_comm is " + + inp_comm + + " or tech is " + + inp_tech + ) + else: + self.cur.execute( + "SELECT input_comm, tech, output_comm FROM Efficiency WHERE regions LIKE '%" + + region + + "%' and (input_comm is " + + inp_comm + + " or output_comm is " + + inp_comm + + " or tech is " + + inp_tech + + ")" + ) + return pd.DataFrame( + self.cur.fetchall(), columns=["input_comm", "tech", "output_comm"] + ) + + def getExistingTechnologiesForCommodity(self, comm, region, comm_type="input"): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + query = "" + if comm_type == "input": + query = ( + "SELECT DISTINCT tech FROM Efficiency WHERE input_comm is '" + + comm + + "'" + ) + else: + query = ( + "SELECT DISTINCT tech FROM Efficiency WHERE output_comm is '" + + comm + + "'" + ) + if region: + query += " AND regions LIKE '%" + region + "%'" + + self.cur.execute(query) + result = pd.DataFrame(self.cur.fetchall(), columns=["tech"]) + return result + + def getCommoditiesForFlags(self, flags=[]): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + query = "" + if (flags is None) or (not flags): + query = "SELECT comm_name FROM commodities" + else: + flag = flags[0] + query = "SELECT comm_name FROM commodities WHERE flag is '" + flag + "'" + for i in range(1, len(flags)): + query += " OR flag is '" + flags[i] + "'" + + result = set() + for row in self.cur.execute(query): + result.add(row[0]) + + return result + + # comm_type can be 'input' or 'output' + def getCommoditiesByTechnology(self, region, comm_type="input"): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + query = "" + if comm_type == "input": + query = "SELECT DISTINCT input_comm, tech FROM Efficiency" + elif comm_type == "output": + query = "SELECT DISTINCT tech, output_comm FROM Efficiency" + else: + raise ValueError("Invalid commodity comm_type: can only be input or output") + + if region: + query += " WHERE regions LIKE '%" + region + "%'" + result = set() + for row in self.cur.execute(query): + result.add((row[0], row[1])) + + return result + + def getCapacityForTechAndPeriod(self, tech=None, period=None, region=None): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + if self.scenario is None or self.scenario == "": + raise ValueError("For Output related queries, please set a scenario first") + + columns = [] + if tech is None: + columns.append("tech") + if period is None: + columns.append("t_periods") + columns.append("capacity") + columns.append("regions") + + query = "SELECT " + columns[0] + + for col in columns[1:]: + query += ", " + col + + query += ( + " FROM Output_CapacityByPeriodAndTech WHERE scenario == '" + + self.scenario + + "'" + ) + + if region: + query += " AND regions LIKE '" + region + "%'" + if tech: + query += " AND tech is '" + tech + "'" + if period: + query += " AND t_periods == '" + str(period) + "'" + + self.cur.execute(query) + result = pd.DataFrame(self.cur.fetchall(), columns=columns) + if region is None: + mask = result["regions"].str.contains("-") + result.loc[mask, "capacity"] /= 2 + + result.drop(columns=["regions"], inplace=True) + if len(columns) == 2: + return result.sum() + else: + return result.groupby(by="tech").sum().reset_index() + + def getOutputFlowForPeriod(self, period, region, comm_type="input", commodity=None): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + if self.scenario is None or self.scenario == "": + raise ValueError("For Output related queries, please set a scenario first") + columns = [] + table = "" + col = "" + if comm_type == "input": + table = "Output_VFlow_In" + if commodity is None: + columns.append("input_comm") + col = "vflow_in" + columns.append("tech") + if comm_type == "output": + table = "Output_VFlow_Out" + if commodity is None: + columns.append("output_comm") + col = "vflow_out" + + query = "SELECT DISTINCT " + for c in columns: + query += c + ", " + query += ( + "SUM(" + + col + + ") AS flow FROM " + + table + + " WHERE scenario is '" + + self.scenario + + "'" + ) + if (region) and (comm_type == "input"): + query += " AND regions LIKE '" + region + "%'" + if (region) and (comm_type == "output"): + query += " AND regions LIKE '%" + region + "'" + query += " AND t_periods is '" + str(period) + "' " + + query2 = " GROUP BY tech" + if not commodity is None: + query += " AND " + comm_type + "_comm is '" + commodity + "'" + if comm_type == "output": + query += " AND input_comm != 'ethos' " + else: + query2 += ", " + comm_type + "_comm" + + query += query2 + columns.append("flow") + self.cur.execute(query) + result = pd.DataFrame(self.cur.fetchall(), columns=columns) + return result + + def getEmissionsActivityForPeriod(self, period, region): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + if self.scenario is None or self.scenario == "": + raise ValueError("For Output related queries, please set a scenario first") + query = ( + "SELECT E.emis_comm, E.tech, SUM(E.emis_act*O.vflow_out) FROM EmissionActivity E, Output_VFlow_Out O " + + "WHERE E.input_comm == O.input_comm AND E.tech == O.tech AND E.vintage == O.vintage AND E.output_comm == O.output_comm AND O.scenario == '" + + self.scenario + + "' " + + "and O.t_periods == '" + + str(period) + + "'" + ) + if region: + query += " AND E.regions LIKE '%" + region + "%'" + query += " GROUP BY E.tech, E.emis_comm" + self.cur.execute(query) + result = pd.DataFrame( + self.cur.fetchall(), columns=["emis_comm", "tech", "emis_activity"] + ) + return result + + def getCommodityWiseInputAndOutputFlow(self, tech, period, region): + if self.cur is None: + raise ValueError("Invalid Operation For dat file") + if self.scenario is None or self.scenario == "": + raise ValueError("For Output related queries, please set a scenario first") + + query = ( + "SELECT OF.input_comm, OF.output_comm, OF.vintage, OF.regions,\ SUM(OF.vflow_in) vflow_in, SUM(OFO.vflow_out) vflow_out, OC.capacity \ FROM (SELECT regions, scenario, sector, t_periods, input_comm, tech, vintage, output_comm, sum(vflow_in) AS vflow_in \ FROM Output_VFlow_In GROUP BY regions, scenario, sector, t_periods, input_comm, tech, vintage, output_comm) AS OF \ @@ -313,17 +373,36 @@ def getCommodityWiseInputAndOutputFlow(self, tech, period, region): OF.tech = OC.tech AND \ OF.vintage = OC.vintage \ WHERE \ - OF.t_periods ='"+ str(period) + "' AND \ - OF.tech is '" + tech+ "' AND \ - OF.scenario is '" + self.scenario + "'" - - if (region): - query += " AND OF.regions LIKE '%" + region + "%'" - - query +=" GROUP BY OF.regions, OF.vintage, OF.input_comm, OF.output_comm" - - self.cur.execute(query) - result = pd.DataFrame(self.cur.fetchall(), columns=['input_comm', 'output_comm', 'vintage', 'regions','flow_in', 'flow_out', 'capacity']) - result = pd.DataFrame(result.groupby(['input_comm', 'output_comm', 'vintage']).sum().reset_index()) - return result - + OF.t_periods ='" + + str(period) + + "' AND \ + OF.tech is '" + + tech + + "' AND \ + OF.scenario is '" + + self.scenario + + "'" + ) + + if region: + query += " AND OF.regions LIKE '%" + region + "%'" + + query += " GROUP BY OF.regions, OF.vintage, OF.input_comm, OF.output_comm" + + self.cur.execute(query) + result = pd.DataFrame( + self.cur.fetchall(), + columns=[ + "input_comm", + "output_comm", + "vintage", + "regions", + "flow_in", + "flow_out", + "capacity", + ], + ) + result = pd.DataFrame( + result.groupby(["input_comm", "output_comm", "vintage"]).sum().reset_index() + ) + return result diff --git a/data_processing/GraphVizFormats.py b/data_processing/GraphVizFormats.py index c70e7d15..799363b4 100644 --- a/data_processing/GraphVizFormats.py +++ b/data_processing/GraphVizFormats.py @@ -285,4 +285,4 @@ {rank = same; %(snodes)s} } -""" \ No newline at end of file +""" diff --git a/data_processing/GraphVizUtil.py b/data_processing/GraphVizUtil.py index 2dcc5964..d9b9cb65 100644 --- a/data_processing/GraphVizUtil.py +++ b/data_processing/GraphVizUtil.py @@ -6,73 +6,176 @@ def processInput(args): - parser = argparse.ArgumentParser(description="Generate Output Plot") - parser.add_argument('-i', '--input', action="store", dest="ifile", help="Input Database Filename ", required=True) - parser.add_argument('-f', '--format', action="store", dest="image_format", help="Graphviz output format (Default: svg)", default='svg') - parser.add_argument('-c', '--show_capacity', action="store_true", dest="show_capacity", - help="Whether capacity shows up in subgraphs (Default: not shown)", default=False) - parser.add_argument('-v', '--splinevar', action="store_true", dest="splinevar", help="Whether subgraph edges to be straight or curved (Default: Straight)", default=False) - parser.add_argument('-t', '--graph_type', action="store", dest="graph_type", help="Type of subgraph (Default: separate_vintages)", - choices = ['separate_vintages', 'explicit_vintages'], default='separate_vintages') - parser.add_argument('-g', '--gray', action="store_true", dest="grey_flag", help="If specified, generates graph in graycale", default=False) - parser.add_argument('-n', '--name', action="store", dest="quick_name", help="Specify the extension you wish to give your quick run") - parser.add_argument('-o', '--output', action="store", dest="res_dir", help='Optional output file path (to dump the images folder)', default='./') - - group1 = parser.add_mutually_exclusive_group() - group1.add_argument('-b', '--technology', action="store", dest="inp_technology", help="Technology for which graph to be generated") - group1.add_argument('-a', '--commodity', action="store", dest="inp_commodity", help="Commodity for which graph to be generated") - - parser.add_argument('-s', '--scenario', action="store", dest="scenario_name", help="Model run scenario name", default=None) - parser.add_argument('-y', '--year', action="store", dest="period", type=int, help="The period for which the graph is to be generated (Used only for output plots)") - parser.add_argument('-r', '--region', action="store", dest="region", help="The region for which the graph is to be generated", default=None) - - options = parser.parse_args(args) - - if (bool(options.scenario_name) ^ bool(options.period)): - parser.print_help() - raise ValueError("Scenario and input year must both be present or not present together") - - return vars(options) + parser = argparse.ArgumentParser(description="Generate Output Plot") + parser.add_argument( + "-i", + "--input", + action="store", + dest="ifile", + help="Input Database Filename ", + required=True, + ) + parser.add_argument( + "-f", + "--format", + action="store", + dest="image_format", + help="Graphviz output format (Default: svg)", + default="svg", + ) + parser.add_argument( + "-c", + "--show_capacity", + action="store_true", + dest="show_capacity", + help="Whether capacity shows up in subgraphs (Default: not shown)", + default=False, + ) + parser.add_argument( + "-v", + "--splinevar", + action="store_true", + dest="splinevar", + help="Whether subgraph edges to be straight or curved (Default: Straight)", + default=False, + ) + parser.add_argument( + "-t", + "--graph_type", + action="store", + dest="graph_type", + help="Type of subgraph (Default: separate_vintages)", + choices=["separate_vintages", "explicit_vintages"], + default="separate_vintages", + ) + parser.add_argument( + "-g", + "--gray", + action="store_true", + dest="grey_flag", + help="If specified, generates graph in graycale", + default=False, + ) + parser.add_argument( + "-n", + "--name", + action="store", + dest="quick_name", + help="Specify the extension you wish to give your quick run", + ) + parser.add_argument( + "-o", + "--output", + action="store", + dest="res_dir", + help="Optional output file path (to dump the images folder)", + default="./", + ) + + group1 = parser.add_mutually_exclusive_group() + group1.add_argument( + "-b", + "--technology", + action="store", + dest="inp_technology", + help="Technology for which graph to be generated", + ) + group1.add_argument( + "-a", + "--commodity", + action="store", + dest="inp_commodity", + help="Commodity for which graph to be generated", + ) + + parser.add_argument( + "-s", + "--scenario", + action="store", + dest="scenario_name", + help="Model run scenario name", + default=None, + ) + parser.add_argument( + "-y", + "--year", + action="store", + dest="period", + type=int, + help="The period for which the graph is to be generated (Used only for output plots)", + ) + parser.add_argument( + "-r", + "--region", + action="store", + dest="region", + help="The region for which the graph is to be generated", + default=None, + ) + + options = parser.parse_args(args) + + if bool(options.scenario_name) ^ bool(options.period): + parser.print_help() + raise ValueError( + "Scenario and input year must both be present or not present together" + ) + + return vars(options) + def getColorConfig(grey_flag): - grey_flag = not (grey_flag) - kwargs = dict( - tech_color = 'darkseagreen' if grey_flag else 'black', - commodity_color = 'lightsteelblue' if grey_flag else 'black', - unused_color = 'powderblue' if grey_flag else 'gray75', - arrowheadout_color = 'forestgreen' if grey_flag else 'black', - arrowheadin_color = 'firebrick' if grey_flag else 'black', - usedfont_color = 'black', - unusedfont_color = 'chocolate' if grey_flag else 'gray75', - menu_color = 'hotpink', - home_color = 'gray75', - font_color = 'black' if grey_flag else 'white', - fill_color = 'lightsteelblue' if grey_flag else 'white', - - #MODELDETAILED, - md_tech_color = 'hotpink', - - sb_incom_color = 'lightsteelblue' if grey_flag else 'black', - sb_outcom_color = 'lawngreen' if grey_flag else 'black', - sb_vpbackg_color = 'lightgrey', - sb_vp_color = 'white', - sb_arrow_color = 'forestgreen' if grey_flag else 'black', - - #SUBGRAPH 1 ARROW COLORS - color_list = ('red', 'orange', 'gold', 'green', 'blue', 'purple', - 'hotpink', 'cyan', 'burlywood', 'coral', 'limegreen', - 'black', 'brown') if grey_flag else ('black', 'black'), - ) - return kwargs - -def _getLen ( key ): - def wrapped ( obj ): - return len(obj[ key ]) - return wrapped - - -def create_text_nodes ( nodes, indent=1 ): - """\ + grey_flag = not (grey_flag) + kwargs = dict( + tech_color="darkseagreen" if grey_flag else "black", + commodity_color="lightsteelblue" if grey_flag else "black", + unused_color="powderblue" if grey_flag else "gray75", + arrowheadout_color="forestgreen" if grey_flag else "black", + arrowheadin_color="firebrick" if grey_flag else "black", + usedfont_color="black", + unusedfont_color="chocolate" if grey_flag else "gray75", + menu_color="hotpink", + home_color="gray75", + font_color="black" if grey_flag else "white", + fill_color="lightsteelblue" if grey_flag else "white", + # MODELDETAILED, + md_tech_color="hotpink", + sb_incom_color="lightsteelblue" if grey_flag else "black", + sb_outcom_color="lawngreen" if grey_flag else "black", + sb_vpbackg_color="lightgrey", + sb_vp_color="white", + sb_arrow_color="forestgreen" if grey_flag else "black", + # SUBGRAPH 1 ARROW COLORS + color_list=( + "red", + "orange", + "gold", + "green", + "blue", + "purple", + "hotpink", + "cyan", + "burlywood", + "coral", + "limegreen", + "black", + "brown", + ) + if grey_flag + else ("black", "black"), + ) + return kwargs + + +def _getLen(key): + def wrapped(obj): + return len(obj[key]) + + return wrapped + + +def create_text_nodes(nodes, indent=1): + """\ Return a set of text nodes in Graphviz DOT format, optimally padded for easier reading and debugging. @@ -81,32 +184,33 @@ def create_text_nodes ( nodes, indent=1 ): indent: integer, number of tabs with which to indent all Dot node lines """ - if not nodes: return '// no nodes in this section' + if not nodes: + return "// no nodes in this section" - # guarantee basic structure of nodes arg - assert( len(nodes) == sum( 1 for a, b in nodes ) ) + # guarantee basic structure of nodes arg + assert len(nodes) == sum(1 for a, b in nodes) - # Step 1: for alignment, get max item length in node list - maxl = max(map(_getLen(0), nodes)) + 2 # account for two extra quotes + # Step 1: for alignment, get max item length in node list + maxl = max(map(_getLen(0), nodes)) + 2 # account for two extra quotes - # Step 2: prepare a text format based on max node size that pads all - # lines with attributes - nfmt_attr = '{0:<%d} [ {1} ] ;' % maxl # node text format - nfmt_noa = '{0} ;' + # Step 2: prepare a text format based on max node size that pads all + # lines with attributes + nfmt_attr = "{0:<%d} [ {1} ] ;" % maxl # node text format + nfmt_noa = "{0} ;" - # Step 3: create each node, and place string representation in a set to - # guarantee uniqueness - q = '"%s"' # enforce quoting for all nodes - gviz = set( nfmt_attr.format( q % n, a ) for n, a in nodes if a ) - gviz.update( nfmt_noa.format( q % n ) for n, a in nodes if not a ) + # Step 3: create each node, and place string representation in a set to + # guarantee uniqueness + q = '"%s"' # enforce quoting for all nodes + gviz = set(nfmt_attr.format(q % n, a) for n, a in nodes if a) + gviz.update(nfmt_noa.format(q % n) for n, a in nodes if not a) - # Step 4: return a sorted version of nodes, as a single string - indent = '\n' + '\t' *indent - return indent.join(sorted( gviz )) + # Step 4: return a sorted version of nodes, as a single string + indent = "\n" + "\t" * indent + return indent.join(sorted(gviz)) -def create_text_edges ( edges, indent=1 ): - """\ +def create_text_edges(edges, indent=1): + """\ Return a set of text edge definitions in Graphviz DOT format, optimally padded for easier reading and debugging. @@ -115,26 +219,27 @@ def create_text_edges ( edges, indent=1 ): indent: integer, number of tabs with which to indent all Dot edge lines """ - if not edges: return '// no edges in this section' - - # guarantee basic structure of edges arg - assert( len(edges) == sum( 1 for a, b, c in edges ) ) - - # Step 1: for alignment, get max length of items on left and right side of - # graph operator token ('->') - maxl, maxr = max(map(_getLen(0), edges)), max(map(_getLen(1), edges)) - maxl += 2 # account for additional two quotes - maxr += 2 # account for additional two quotes - - # Step 2: prepare format to be "\n\tinp+PADDING -> out+PADDING [..." - efmt_attr = '{0:<%d} -> {1:<%d} [ {2} ] ;' % (maxl, maxr) # with attributes - efmt_noa = '{0:<%d} -> {1} ;' % maxl # no attributes - - # Step 3: add each edge to a set (to guarantee unique entries only) - q = '"%s"' # enforce quoting for all tokens - gviz = set( efmt_attr.format( q % i, q % t, a ) for i, t, a in edges if a ) - gviz.update( efmt_noa.format( q % i, q % t ) for i, t, a in edges if not a ) - - # Step 4: return a sorted version of the edges, as a single string - indent = '\n' + '\t' *indent - return indent.join(sorted( gviz )) \ No newline at end of file + if not edges: + return "// no edges in this section" + + # guarantee basic structure of edges arg + assert len(edges) == sum(1 for a, b, c in edges) + + # Step 1: for alignment, get max length of items on left and right side of + # graph operator token ('->') + maxl, maxr = max(map(_getLen(0), edges)), max(map(_getLen(1), edges)) + maxl += 2 # account for additional two quotes + maxr += 2 # account for additional two quotes + + # Step 2: prepare format to be "\n\tinp+PADDING -> out+PADDING [..." + efmt_attr = "{0:<%d} -> {1:<%d} [ {2} ] ;" % (maxl, maxr) # with attributes + efmt_noa = "{0:<%d} -> {1} ;" % maxl # no attributes + + # Step 3: add each edge to a set (to guarantee unique entries only) + q = '"%s"' # enforce quoting for all tokens + gviz = set(efmt_attr.format(q % i, q % t, a) for i, t, a in edges if a) + gviz.update(efmt_noa.format(q % i, q % t) for i, t, a in edges if not a) + + # Step 4: return a sorted version of the edges, as a single string + indent = "\n" + "\t" * indent + return indent.join(sorted(gviz)) diff --git a/data_processing/MakeGraphviz.py b/data_processing/MakeGraphviz.py index 700f0aef..27be79bd 100644 --- a/data_processing/MakeGraphviz.py +++ b/data_processing/MakeGraphviz.py @@ -8,393 +8,497 @@ class GraphvizDiagramGenerator(object): - def __init__(self, dbFile, scenario=None, region=None, outDir='.', verbose = 1): - self.dbFile = dbFile - self.qName = os.path.splitext(os.path.basename(self.dbFile))[0] - self.scenario = scenario - self.region = region - self.outDir = outDir - self.folder = {'results' : 'whole_system', 'tech' : 'processes', 'comm' : 'commodities'} - self.verbose = verbose - self.colors = {} - - def connect(self): - self.dbUtil = DatabaseUtil(self.dbFile, self.scenario) - self.logger = open(os.path.join(self.outDir, 'graphviz.log'), 'w') - self.setGraphicOptions(False, False) - self.__log__('--------------------------------------') - self.__log__('GraphvizDiagramGenerator: connected') - if (self.scenario): - outDir = self.qName + '_' + self.scenario + '_graphviz' - else: - outDir = self.qName + '_input_graphviz' - - self.outDir = os.path.join(self.outDir, outDir) - if (not os.path.exists(self.outDir)): - os.mkdir(self.outDir) - #os.chdir(self.outDir) - - def close(self): - self.dbUtil.close() - self.__log__('GraphvizDiagramGenerator: disconnected') - self.__log__('--------------------------------------') - self.logger.close() - #os.chdir('..') - - def __log__(self, msg): - if (self.verbose == 1): - print(msg) - self.logger.write(msg + '\n') - - def __generateGraph__(self, dotFormat, dotArgs, outputName, outputFormat): - dotArgs.update(self.colors) - with open(outputName + '.dot', 'w') as f: - f.write(dotFormat % dotArgs) - cmd = ('dot', '-T' + outputFormat, '-o' + outputName +'.' + outputFormat, outputName+'.dot') - call(cmd) - - def setGraphicOptions(self, greyFlag=None, splinevar=None): - if (not greyFlag is None): - self.greyFlag = greyFlag - self.colors.update(getColorConfig(self.greyFlag)) - if (not splinevar is None): - self.colors['splinevar'] = splinevar - self.__log__('setGraphicOption: updated greyFlag = ' + str(self.greyFlag) + ' and splinevar = '+ str(self.colors['splinevar'])) - - def CreateMainResultsDiagram (self, period, region, outputFormat='svg'): - self.__log__('CreateMainResultsDiagram: started with period = ' + str(period)) - - if (not os.path.exists( os.path.join(self.outDir, self.folder['results']) )): - os.makedirs( os.path.join(self.outDir, self.folder['results']) ) - - outputName = os.path.join(self.folder['results'], 'results%s' % period) - if (self.region): - outputName += '_' + self.region - outputName = os.path.join(self.outDir, outputName) - if (self.greyFlag): - outputName += '.grey' - #if (os.path.exists(outputName + '.' + outputFormat)): - # self.__log__('CreateMainResultsDiagram: graph already exists at path, returning') - # return self.outDir, outputName + '.' + outputFormat - - time_exist = self.dbUtil.getTimePeridosForFlags(flags=['e']) - time_future = self.dbUtil.getTimePeridosForFlags(flags=['f']) - time_optimize = set(sorted(time_future)[:-1]) - - tech_all = self.dbUtil.getTechnologiesForFlags(flags=['r','p','pb','ps']) - - commodity_carrier = self.dbUtil.getCommoditiesForFlags(flags=['d','p']) - commodity_emissions = self.dbUtil.getCommoditiesForFlags(flags=['e']) - - Efficiency_Input = self.dbUtil.getCommoditiesByTechnology(region, comm_type='input') - Efficiency_Output = self.dbUtil.getCommoditiesByTechnology(region, comm_type='output') - - V_Cap2 = self.dbUtil.getCapacityForTechAndPeriod(period=period, region=region) - - EI2 = self.dbUtil.getOutputFlowForPeriod(period=period, region=region, comm_type='input') - EO2 = self.dbUtil.getOutputFlowForPeriod(period=period, region=region, comm_type='output') - - EmiO2 = self.dbUtil.getEmissionsActivityForPeriod(period=period, region=region) - - self.__log__('CreateMainResultsDiagram: database fetched successfully') - - tech_attr_fmt = 'label="%s\\nCapacity: %.2f", href="#", onclick="loadNextGraphvizGraph(\'results\', \'%s\', \'%s\')"' - #tech_attr_fmt = 'label="%%s\\nCapacity: %%.2f", href="results_%%s_%%s.%s"' - # tech_attr_fmt %= outputFormat - # commodity_fmt = 'href="../commodities/rc_%%s_%%s.%s"' % outputFormat - commodity_fmt = 'href="#", onclick="loadNextGraphvizGraph(\'results\', \'%s\', \'%s\')"' - flow_fmt = 'label="%.2f"' - - epsilon = 0.005 - - etechs, dtechs, ecarriers, xnodes = set(), set(), set(), set() - eemissions = set() - eflowsi, eflowso, dflows = set(), set(), set() # edges - usedc, usede = set(), set() # used carriers, used emissions - - V_Cap2.index = V_Cap2.tech - for tech in set(tech_all) - set(V_Cap2.tech): - dtechs.add((tech, None)) - - for i in range(len(V_Cap2)): - row = V_Cap2.iloc[i] - etechs.add( (row['tech'], tech_attr_fmt % (row['tech'], row['capacity'], row['tech'], period)) ) - # etechs.add( (row['tech'], tech_attr_fmt % (row['tech'], row['capacity'], row['tech'], period)) ) - - udflows = set() - for i in range(len(EI2)): - row = EI2.iloc[i] - if (row['input_comm'] != 'ethos'): - eflowsi.add((row['input_comm'], row['tech'], flow_fmt % row['flow'])) - ecarriers.add((row['input_comm'], commodity_fmt % (row['input_comm'], period))) - usedc.add(row['input_comm']) - else: - cap = V_Cap2.loc[row['tech']].capacity - xnodes.add((row['tech'], tech_attr_fmt % (row['tech'], cap, row['tech'], period))) - udflows.add((row['input_comm'], row['tech'])) - - for row in set(Efficiency_Input) - udflows: - if row[0] != 'ethos': - dflows.add((row[0], row[1], None)) - else: - xnodes.add((row[1], None)) - - udflows = set() - for i in range(len(EO2)): - row = EO2.iloc[i] - eflowso.add((row['tech'], row['output_comm'], flow_fmt % row['flow'])) - ecarriers.add((row['output_comm'], commodity_fmt % (row['output_comm'], period))) - usedc.add(row['output_comm']) - udflows.add((row['tech'], row['output_comm'])) - - for row in set(Efficiency_Output) - udflows: - dflows.add((row[0], row[1], None)) - - for i in range(len(EmiO2)): - row = EmiO2.iloc[i] - if (row['emis_activity'] >= epsilon): - eflowso.add((row['tech'], row['emis_comm'], flow_fmt % row['emis_activity'])) - eemissions.add((row['emis_comm'], None)) - usede.add(row['emis_comm']) - - dcarriers = set() - demissions = set() - for cc in commodity_carrier: - if cc not in usedc and cc != 'ethos' : - dcarriers.add((cc, None)) - for ee in commodity_emissions: - if ee not in usede: - demissions.add((ee, None)) - - self.__log__('CreateMainResultsDiagram: creating diagrams') - args = dict( - period = period, - splinevar = self.colors['splinevar'], - dtechs = create_text_nodes( dtechs, indent=2 ), - etechs = create_text_nodes( etechs, indent=2 ), - xnodes = create_text_nodes( xnodes, indent=2 ), - dcarriers = create_text_nodes( dcarriers, indent=2 ), - ecarriers = create_text_nodes( ecarriers, indent=2 ), - demissions = create_text_nodes( demissions, indent=2 ), - eemissions = create_text_nodes( eemissions, indent=2 ), - dflows = create_text_edges( dflows, indent=2 ), - eflowsi = create_text_edges( eflowsi, indent=3 ), - eflowso = create_text_edges( eflowso, indent=3 )) - - self.__generateGraph__(results_dot_fmt, args, outputName, outputFormat) - self.__log__('CreateMainResultsDiagram: graph generated, returning') - return self.outDir, outputName + '.'+ outputFormat - - # Needs some small fixing - cases where no input but output is there. # Check sample graphs - def CreateTechResultsDiagrams (self, period, region, tech, outputFormat='svg'): # tech results - self.__log__('CreateTechResultsDiagrams: started with period = ' + str(period) + ' and tech = '+str(tech)) - - if (not os.path.exists(os.path.join(self.outDir, self.folder['tech']))): - os.makedirs( os.path.join(self.outDir, self.folder['tech']) ) - - outputName = os.path.join(self.folder['tech'], 'results_%s_%s' % (tech, period)) - if (self.region): - outputName += '_' + self.region - outputName = os.path.join(self.outDir, outputName) - if (self.greyFlag): - outputName += '.grey' - #if (os.path.exists(outputName + '.' + outputFormat)): - # self.__log__('CreateTechResultsDiagrams: graph already exists at path, returning') - # return self.outDir, outputName + '.' + outputFormat - - # enode_attr_fmt = 'href="../commodities/rc_%%s_%%s.%s"' % outputFormat - # vnode_attr_fmt = 'href="results_%%s_p%%sv%%s_segments.%s", ' % outputFormat - # vnode_attr_fmt += 'label="%s\\nCap: %.2f"' - enode_attr_fmt = 'href="#", onclick="loadNextGraphvizGraph(\'results\', \'%s\', \'%s\')"' - vnode_attr_fmt = 'href="#", onclick="loadNextGraphvizGraph(\'%s\', \'%s\', \'%s\')"' - vnode_attr_fmt += 'label="%s\\nCap: %.2f"' - - total_cap = self.dbUtil.getCapacityForTechAndPeriod(tech, period, region) - flows = self.dbUtil.getCommodityWiseInputAndOutputFlow(tech, period, region) - - self.__log__('CreateTechResultsDiagrams: database fetched successfully') - - enodes, vnodes, iedges, oedges = set(), set(), set(), set() - for i in range(len(flows)): - row = flows.iloc[i] - vnode = str(row['vintage']) - vnodes.add( (vnode, vnode_attr_fmt % - (tech, period, row['vintage'], row['vintage'], row['capacity']) ) ) - - if row['input_comm'] != 'ethos': - enodes.add( (row['input_comm'], enode_attr_fmt % (row['input_comm'], period)) ) - iedges.add( (row['input_comm'], vnode, 'label="%.2f"' % row['flow_in']) ) - enodes.add( (row['output_comm'], enode_attr_fmt % (row['output_comm'], period)) ) - oedges.add( (vnode, row['output_comm'], 'label="%.2f"' % row['flow_out']) ) - - #cluster_vintage_url = "results%s.%s" % (period, outputFormat) - cluster_vintage_url = "#" - - if vnodes: - self.__log__("CreateTechResultsDiagrams: creating diagrams") - args = dict( - cluster_vintage_url = cluster_vintage_url, - total_cap = total_cap, - inp_technology = tech, - period = period, - vnodes = create_text_nodes( vnodes, indent=2 ), - enodes = create_text_nodes( enodes, indent=2 ), - iedges = create_text_edges( iedges, indent=2 ), - oedges = create_text_edges( oedges, indent=2 )) - self.__generateGraph__(tech_results_dot_fmt, args, outputName, outputFormat) - else: - self.__log__("CreateTechResultsDiagrams: nothing to create") - - self.__log__('CreateTechResultsDiagrams: graph generated, returning') - return self.outDir, outputName + '.'+ outputFormat - - def CreateCommodityPartialResults (self, period, region, comm, outputFormat='svg'): - self.__log__('CreateCommodityPartialResults: started with period = ' + str(period) + ' and comm = '+str(comm)) - - if (not os.path.exists( os.path.join(self.outDir, self.folder['comm']) )): - os.makedirs( os.path.join(self.outDir, self.folder['comm']) ) - - outputName = os.path.join(self.folder['comm'], 'rc_%s_%s' % (comm, period)) - if (self.region): - outputName += '_' + self.region - outputName = os.path.join(self.outDir, outputName) - if (self.greyFlag): - outputName += '.grey' - #if (os.path.exists(outputName + '.' + outputFormat)): - # self.__log__('CreateCommodityPartialResults: graph already exists at path, returning') - # return self.outDir, outputName + '.' + outputFormat - - input_total = set(self.dbUtil.getExistingTechnologiesForCommodity(comm, region, 'output')['tech']) - output_total = set(self.dbUtil.getExistingTechnologiesForCommodity(comm, region, 'input')['tech']) - - flow_in = self.dbUtil.getOutputFlowForPeriod(period, region, 'input', comm) - otechs = set(flow_in['tech']) - - flow_out = self.dbUtil.getOutputFlowForPeriod(period, region, 'output', comm) - itechs = set(flow_out['tech']) - - self.__log__('CreateCommodityPartialResults: database fetched successfully') - - period_results_url_fmt = '../results/results%%s.%s' % outputFormat - # node_attr_fmt = 'href="../results/results_%%s_%%s.%s"' % outputFormat - # rc_node_fmt = 'color="%s", href="%s", shape="circle", fillcolor="%s", fontcolor="black"' - - node_attr_fmt = 'href="#", onclick="loadNextGraphvizGraph(\'results\', \'%s\', \'%s\')"' - rc_node_fmt = 'color="%s", href="%s", shape="circle", fillcolor="%s", fontcolor="black"' - - # url = period_results_url_fmt % period - url = '#' - enodes, dnodes, eedges, dedges = set(), set(), set(), set() - - rcnode = ((comm, rc_node_fmt % (self.colors['commodity_color'], url, self.colors['fill_color'])),) - - for i in range(len(flow_in)): - t = flow_in.iloc[i]['tech'] - f = flow_in.iloc[i]['flow'] - enodes.add( (t, node_attr_fmt % (t, period)) ) - eedges.add( (comm, t, 'label="%.2f"' % f) ) - for t in output_total - otechs: - dnodes.add( (t, None) ) - dedges.add( (comm, t, None) ) - for i in range(len(flow_out)): - t = flow_out.iloc[i]['tech'] - f = flow_out.iloc[i]['flow'] - enodes.add( (t, node_attr_fmt % (t, period)) ) - eedges.add( (t, comm, 'label="%.2f"' % f) ) - for t in input_total - itechs: - dnodes.add( (t, None) ) - dedges.add( (t, comm, None) ) - - self.__log__("CreateCommodityPartialResults: creating diagrams") - args = dict( - inp_commodity = comm, - period = period, - resource_node = create_text_nodes( rcnode ), - used_nodes = create_text_nodes( enodes, indent=2 ), - unused_nodes = create_text_nodes( dnodes, indent=2 ), - used_edges = create_text_edges( eedges, indent=2 ), - unused_edges = create_text_edges( dedges, indent=2 )) - self.__generateGraph__(commodity_dot_fmt, args, outputName, outputFormat) - self.__log__ ("CreateCommodityPartialResults: graph generated, returning") - return self.outDir, outputName + '.'+ outputFormat - - # Function for generating the Input Graph - def createCompleteInputGraph(self, region, inp_tech=None, inp_comm=None, outputFormat='svg') : - self.__log__('createCompleteInputGraph: started with inp_tech = ' + str(inp_tech)+ ' and inp_comm = ' + str(inp_comm)) - outputName = self.qName - - if (inp_tech): - outputName += "_"+str(inp_tech) - if (not os.path.exists(os.path.join(self.outDir, self.folder['tech']))): - os.makedirs( os.path.join(self.outDir, self.folder['tech']) ) - outputName = os.path.join(self.folder['tech'], outputName) - elif (inp_comm): - outputName += "_"+str(inp_comm) - if (not os.path.exists(os.path.join(self.outDir, self.folder['comm']))): - os.makedirs( os.path.join(self.outDir, self.folder['comm']) ) - outputName = os.path.join(self.folder['comm'], outputName) - else: - if (not os.path.exists(os.path.join(self.outDir, self.folder['results']))): - os.makedirs( os.path.join(self.outDir, self.folder['results']) ) - outputName = os.path.join(self.folder['results'], outputName) - - if (self.region): - outputName += '_' + self.region - - outputName = os.path.join(self.outDir, outputName) - if (self.greyFlag): - outputName += '.grey' - #if (os.path.exists(outputName + '.' + outputFormat)): - # self.__log__('createCompleteInputGraph: graph already exists at path, returning') - # return self.outDir, outputName + '.' + outputFormat - - nodes, tech, ltech, to_tech, from_tech = set(), set(), set(), set(), set() - - if DatabaseUtil.isDataBaseFile(self.dbFile): - res = self.dbUtil.getCommoditiesAndTech(inp_comm, inp_tech, region) - else: - res = self.dbUtil.readFromDatFile(inp_comm, inp_tech) - - self.__log__('createCompleteInputGraph: database fetched successfully') - # Create nodes and edges using the data frames from database - for i in range(len(res)): - row = res.iloc[i] - if row['input_comm'] != 'ethos': - nodes.add(row['input_comm']) - else : - ltech.add(row['tech']) - nodes.add(row['output_comm']) - tech.add(row['tech']) - - if row['input_comm'] != 'ethos': - to_tech.add('"%s"' % row['input_comm'] + '\t->\t"%s"' % row['tech']) - from_tech.add('"%s"' % row['tech'] + '\t->\t"%s"' % row['output_comm']) - - self.__log__("createCompleteInputGraph: creating diagrams") - - args = dict( - enodes = "".join('"%s";\n\t\t' % x for x in nodes), - tnodes = "".join('"%s";\n\t\t' % x for x in tech), - iedges = "".join('%s;\n\t\t' % x for x in to_tech), - oedges = "".join('%s;\n\t\t' % x for x in from_tech), - snodes = ";".join('"%s"' %x for x in ltech), - ) - self.__generateGraph__(quick_run_dot_fmt, args, outputName, outputFormat) - self.__log__ ("createCompleteInputGraph: graph generated, returning") - return self.outDir, outputName + '.' + outputFormat - -if __name__ == '__main__': - input = processInput(sys.argv[1:]) - graphGen = GraphvizDiagramGenerator(input['ifile'], input['scenario_name'], input['region']) - graphGen.connect() - graphGen.setGraphicOptions(greyFlag = input['grey_flag'], splinevar = input['splinevar']) - if (input['scenario_name'] is None): - res = graphGen.createCompleteInputGraph(input['region'], input['inp_technology'], input['inp_commodity']) - elif (input['inp_technology'] is None and input['inp_commodity'] is None): - res = graphGen.CreateMainResultsDiagram(input['period'], input['region']) - elif (input['inp_commodity'] is None): - res = graphGen.CreateTechResultsDiagrams(input['period'], input['region'], input['inp_technology']) - elif (input['inp_technology'] is None): - res = graphGen.CreateCommodityPartialResults(input['period'], input['region'], input['inp_commodity']) - graphGen.close() - print('Check graph generated at ', res[1], ' and all results at ', res[0]) + def __init__(self, dbFile, scenario=None, region=None, outDir=".", verbose=1): + self.dbFile = dbFile + self.qName = os.path.splitext(os.path.basename(self.dbFile))[0] + self.scenario = scenario + self.region = region + self.outDir = outDir + self.folder = { + "results": "whole_system", + "tech": "processes", + "comm": "commodities", + } + self.verbose = verbose + self.colors = {} + + def connect(self): + self.dbUtil = DatabaseUtil(self.dbFile, self.scenario) + self.logger = open(os.path.join(self.outDir, "graphviz.log"), "w") + self.setGraphicOptions(False, False) + self.__log__("--------------------------------------") + self.__log__("GraphvizDiagramGenerator: connected") + if self.scenario: + outDir = self.qName + "_" + self.scenario + "_graphviz" + else: + outDir = self.qName + "_input_graphviz" + + self.outDir = os.path.join(self.outDir, outDir) + if not os.path.exists(self.outDir): + os.mkdir(self.outDir) + # os.chdir(self.outDir) + + def close(self): + self.dbUtil.close() + self.__log__("GraphvizDiagramGenerator: disconnected") + self.__log__("--------------------------------------") + self.logger.close() + # os.chdir('..') + + def __log__(self, msg): + if self.verbose == 1: + print(msg) + self.logger.write(msg + "\n") + + def __generateGraph__(self, dotFormat, dotArgs, outputName, outputFormat): + dotArgs.update(self.colors) + with open(outputName + ".dot", "w") as f: + f.write(dotFormat % dotArgs) + cmd = ( + "dot", + "-T" + outputFormat, + "-o" + outputName + "." + outputFormat, + outputName + ".dot", + ) + call(cmd) + + def setGraphicOptions(self, greyFlag=None, splinevar=None): + if not greyFlag is None: + self.greyFlag = greyFlag + self.colors.update(getColorConfig(self.greyFlag)) + if not splinevar is None: + self.colors["splinevar"] = splinevar + self.__log__( + "setGraphicOption: updated greyFlag = " + + str(self.greyFlag) + + " and splinevar = " + + str(self.colors["splinevar"]) + ) + + def CreateMainResultsDiagram(self, period, region, outputFormat="svg"): + self.__log__("CreateMainResultsDiagram: started with period = " + str(period)) + + if not os.path.exists(os.path.join(self.outDir, self.folder["results"])): + os.makedirs(os.path.join(self.outDir, self.folder["results"])) + + outputName = os.path.join(self.folder["results"], "results%s" % period) + if self.region: + outputName += "_" + self.region + outputName = os.path.join(self.outDir, outputName) + if self.greyFlag: + outputName += ".grey" + # if (os.path.exists(outputName + '.' + outputFormat)): + # self.__log__('CreateMainResultsDiagram: graph already exists at path, returning') + # return self.outDir, outputName + '.' + outputFormat + + time_exist = self.dbUtil.getTimePeridosForFlags(flags=["e"]) + time_future = self.dbUtil.getTimePeridosForFlags(flags=["f"]) + time_optimize = set(sorted(time_future)[:-1]) + + tech_all = self.dbUtil.getTechnologiesForFlags(flags=["r", "p", "pb", "ps"]) + + commodity_carrier = self.dbUtil.getCommoditiesForFlags(flags=["d", "p"]) + commodity_emissions = self.dbUtil.getCommoditiesForFlags(flags=["e"]) + + Efficiency_Input = self.dbUtil.getCommoditiesByTechnology( + region, comm_type="input" + ) + Efficiency_Output = self.dbUtil.getCommoditiesByTechnology( + region, comm_type="output" + ) + + V_Cap2 = self.dbUtil.getCapacityForTechAndPeriod(period=period, region=region) + + EI2 = self.dbUtil.getOutputFlowForPeriod( + period=period, region=region, comm_type="input" + ) + EO2 = self.dbUtil.getOutputFlowForPeriod( + period=period, region=region, comm_type="output" + ) + + EmiO2 = self.dbUtil.getEmissionsActivityForPeriod(period=period, region=region) + + self.__log__("CreateMainResultsDiagram: database fetched successfully") + + tech_attr_fmt = "label=\"%s\\nCapacity: %.2f\", href=\"#\", onclick=\"loadNextGraphvizGraph('results', '%s', '%s')\"" + # tech_attr_fmt = 'label="%%s\\nCapacity: %%.2f", href="results_%%s_%%s.%s"' + # tech_attr_fmt %= outputFormat + # commodity_fmt = 'href="../commodities/rc_%%s_%%s.%s"' % outputFormat + commodity_fmt = ( + "href=\"#\", onclick=\"loadNextGraphvizGraph('results', '%s', '%s')\"" + ) + flow_fmt = 'label="%.2f"' + + epsilon = 0.005 + + etechs, dtechs, ecarriers, xnodes = set(), set(), set(), set() + eemissions = set() + eflowsi, eflowso, dflows = set(), set(), set() # edges + usedc, usede = set(), set() # used carriers, used emissions + + V_Cap2.index = V_Cap2.tech + for tech in set(tech_all) - set(V_Cap2.tech): + dtechs.add((tech, None)) + + for i in range(len(V_Cap2)): + row = V_Cap2.iloc[i] + etechs.add( + ( + row["tech"], + tech_attr_fmt % (row["tech"], row["capacity"], row["tech"], period), + ) + ) + # etechs.add( (row['tech'], tech_attr_fmt % (row['tech'], row['capacity'], row['tech'], period)) ) + + udflows = set() + for i in range(len(EI2)): + row = EI2.iloc[i] + if row["input_comm"] != "ethos": + eflowsi.add((row["input_comm"], row["tech"], flow_fmt % row["flow"])) + ecarriers.add( + (row["input_comm"], commodity_fmt % (row["input_comm"], period)) + ) + usedc.add(row["input_comm"]) + else: + cap = V_Cap2.loc[row["tech"]].capacity + xnodes.add( + ( + row["tech"], + tech_attr_fmt % (row["tech"], cap, row["tech"], period), + ) + ) + udflows.add((row["input_comm"], row["tech"])) + + for row in set(Efficiency_Input) - udflows: + if row[0] != "ethos": + dflows.add((row[0], row[1], None)) + else: + xnodes.add((row[1], None)) + + udflows = set() + for i in range(len(EO2)): + row = EO2.iloc[i] + eflowso.add((row["tech"], row["output_comm"], flow_fmt % row["flow"])) + ecarriers.add( + (row["output_comm"], commodity_fmt % (row["output_comm"], period)) + ) + usedc.add(row["output_comm"]) + udflows.add((row["tech"], row["output_comm"])) + + for row in set(Efficiency_Output) - udflows: + dflows.add((row[0], row[1], None)) + + for i in range(len(EmiO2)): + row = EmiO2.iloc[i] + if row["emis_activity"] >= epsilon: + eflowso.add( + (row["tech"], row["emis_comm"], flow_fmt % row["emis_activity"]) + ) + eemissions.add((row["emis_comm"], None)) + usede.add(row["emis_comm"]) + + dcarriers = set() + demissions = set() + for cc in commodity_carrier: + if cc not in usedc and cc != "ethos": + dcarriers.add((cc, None)) + for ee in commodity_emissions: + if ee not in usede: + demissions.add((ee, None)) + + self.__log__("CreateMainResultsDiagram: creating diagrams") + args = dict( + period=period, + splinevar=self.colors["splinevar"], + dtechs=create_text_nodes(dtechs, indent=2), + etechs=create_text_nodes(etechs, indent=2), + xnodes=create_text_nodes(xnodes, indent=2), + dcarriers=create_text_nodes(dcarriers, indent=2), + ecarriers=create_text_nodes(ecarriers, indent=2), + demissions=create_text_nodes(demissions, indent=2), + eemissions=create_text_nodes(eemissions, indent=2), + dflows=create_text_edges(dflows, indent=2), + eflowsi=create_text_edges(eflowsi, indent=3), + eflowso=create_text_edges(eflowso, indent=3), + ) + + self.__generateGraph__(results_dot_fmt, args, outputName, outputFormat) + self.__log__("CreateMainResultsDiagram: graph generated, returning") + return self.outDir, outputName + "." + outputFormat + + # Needs some small fixing - cases where no input but output is there. # Check sample graphs + def CreateTechResultsDiagrams( + self, period, region, tech, outputFormat="svg" + ): # tech results + self.__log__( + "CreateTechResultsDiagrams: started with period = " + + str(period) + + " and tech = " + + str(tech) + ) + + if not os.path.exists(os.path.join(self.outDir, self.folder["tech"])): + os.makedirs(os.path.join(self.outDir, self.folder["tech"])) + + outputName = os.path.join(self.folder["tech"], "results_%s_%s" % (tech, period)) + if self.region: + outputName += "_" + self.region + outputName = os.path.join(self.outDir, outputName) + if self.greyFlag: + outputName += ".grey" + # if (os.path.exists(outputName + '.' + outputFormat)): + # self.__log__('CreateTechResultsDiagrams: graph already exists at path, returning') + # return self.outDir, outputName + '.' + outputFormat + + # enode_attr_fmt = 'href="../commodities/rc_%%s_%%s.%s"' % outputFormat + # vnode_attr_fmt = 'href="results_%%s_p%%sv%%s_segments.%s", ' % outputFormat + # vnode_attr_fmt += 'label="%s\\nCap: %.2f"' + enode_attr_fmt = ( + "href=\"#\", onclick=\"loadNextGraphvizGraph('results', '%s', '%s')\"" + ) + vnode_attr_fmt = ( + "href=\"#\", onclick=\"loadNextGraphvizGraph('%s', '%s', '%s')\"" + ) + vnode_attr_fmt += 'label="%s\\nCap: %.2f"' + + total_cap = self.dbUtil.getCapacityForTechAndPeriod(tech, period, region) + flows = self.dbUtil.getCommodityWiseInputAndOutputFlow(tech, period, region) + + self.__log__("CreateTechResultsDiagrams: database fetched successfully") + + enodes, vnodes, iedges, oedges = set(), set(), set(), set() + for i in range(len(flows)): + row = flows.iloc[i] + vnode = str(row["vintage"]) + vnodes.add( + ( + vnode, + vnode_attr_fmt + % (tech, period, row["vintage"], row["vintage"], row["capacity"]), + ) + ) + + if row["input_comm"] != "ethos": + enodes.add( + (row["input_comm"], enode_attr_fmt % (row["input_comm"], period)) + ) + iedges.add((row["input_comm"], vnode, 'label="%.2f"' % row["flow_in"])) + enodes.add( + (row["output_comm"], enode_attr_fmt % (row["output_comm"], period)) + ) + oedges.add((vnode, row["output_comm"], 'label="%.2f"' % row["flow_out"])) + + # cluster_vintage_url = "results%s.%s" % (period, outputFormat) + cluster_vintage_url = "#" + + if vnodes: + self.__log__("CreateTechResultsDiagrams: creating diagrams") + args = dict( + cluster_vintage_url=cluster_vintage_url, + total_cap=total_cap, + inp_technology=tech, + period=period, + vnodes=create_text_nodes(vnodes, indent=2), + enodes=create_text_nodes(enodes, indent=2), + iedges=create_text_edges(iedges, indent=2), + oedges=create_text_edges(oedges, indent=2), + ) + self.__generateGraph__(tech_results_dot_fmt, args, outputName, outputFormat) + else: + self.__log__("CreateTechResultsDiagrams: nothing to create") + + self.__log__("CreateTechResultsDiagrams: graph generated, returning") + return self.outDir, outputName + "." + outputFormat + + def CreateCommodityPartialResults(self, period, region, comm, outputFormat="svg"): + self.__log__( + "CreateCommodityPartialResults: started with period = " + + str(period) + + " and comm = " + + str(comm) + ) + + if not os.path.exists(os.path.join(self.outDir, self.folder["comm"])): + os.makedirs(os.path.join(self.outDir, self.folder["comm"])) + + outputName = os.path.join(self.folder["comm"], "rc_%s_%s" % (comm, period)) + if self.region: + outputName += "_" + self.region + outputName = os.path.join(self.outDir, outputName) + if self.greyFlag: + outputName += ".grey" + # if (os.path.exists(outputName + '.' + outputFormat)): + # self.__log__('CreateCommodityPartialResults: graph already exists at path, returning') + # return self.outDir, outputName + '.' + outputFormat + + input_total = set( + self.dbUtil.getExistingTechnologiesForCommodity(comm, region, "output")[ + "tech" + ] + ) + output_total = set( + self.dbUtil.getExistingTechnologiesForCommodity(comm, region, "input")[ + "tech" + ] + ) + + flow_in = self.dbUtil.getOutputFlowForPeriod(period, region, "input", comm) + otechs = set(flow_in["tech"]) + + flow_out = self.dbUtil.getOutputFlowForPeriod(period, region, "output", comm) + itechs = set(flow_out["tech"]) + + self.__log__("CreateCommodityPartialResults: database fetched successfully") + + period_results_url_fmt = "../results/results%%s.%s" % outputFormat + # node_attr_fmt = 'href="../results/results_%%s_%%s.%s"' % outputFormat + # rc_node_fmt = 'color="%s", href="%s", shape="circle", fillcolor="%s", fontcolor="black"' + + node_attr_fmt = ( + "href=\"#\", onclick=\"loadNextGraphvizGraph('results', '%s', '%s')\"" + ) + rc_node_fmt = ( + 'color="%s", href="%s", shape="circle", fillcolor="%s", fontcolor="black"' + ) + + # url = period_results_url_fmt % period + url = "#" + enodes, dnodes, eedges, dedges = set(), set(), set(), set() + + rcnode = ( + ( + comm, + rc_node_fmt + % (self.colors["commodity_color"], url, self.colors["fill_color"]), + ), + ) + + for i in range(len(flow_in)): + t = flow_in.iloc[i]["tech"] + f = flow_in.iloc[i]["flow"] + enodes.add((t, node_attr_fmt % (t, period))) + eedges.add((comm, t, 'label="%.2f"' % f)) + for t in output_total - otechs: + dnodes.add((t, None)) + dedges.add((comm, t, None)) + for i in range(len(flow_out)): + t = flow_out.iloc[i]["tech"] + f = flow_out.iloc[i]["flow"] + enodes.add((t, node_attr_fmt % (t, period))) + eedges.add((t, comm, 'label="%.2f"' % f)) + for t in input_total - itechs: + dnodes.add((t, None)) + dedges.add((t, comm, None)) + + self.__log__("CreateCommodityPartialResults: creating diagrams") + args = dict( + inp_commodity=comm, + period=period, + resource_node=create_text_nodes(rcnode), + used_nodes=create_text_nodes(enodes, indent=2), + unused_nodes=create_text_nodes(dnodes, indent=2), + used_edges=create_text_edges(eedges, indent=2), + unused_edges=create_text_edges(dedges, indent=2), + ) + self.__generateGraph__(commodity_dot_fmt, args, outputName, outputFormat) + self.__log__("CreateCommodityPartialResults: graph generated, returning") + return self.outDir, outputName + "." + outputFormat + + # Function for generating the Input Graph + def createCompleteInputGraph( + self, region, inp_tech=None, inp_comm=None, outputFormat="svg" + ): + self.__log__( + "createCompleteInputGraph: started with inp_tech = " + + str(inp_tech) + + " and inp_comm = " + + str(inp_comm) + ) + outputName = self.qName + + if inp_tech: + outputName += "_" + str(inp_tech) + if not os.path.exists(os.path.join(self.outDir, self.folder["tech"])): + os.makedirs(os.path.join(self.outDir, self.folder["tech"])) + outputName = os.path.join(self.folder["tech"], outputName) + elif inp_comm: + outputName += "_" + str(inp_comm) + if not os.path.exists(os.path.join(self.outDir, self.folder["comm"])): + os.makedirs(os.path.join(self.outDir, self.folder["comm"])) + outputName = os.path.join(self.folder["comm"], outputName) + else: + if not os.path.exists(os.path.join(self.outDir, self.folder["results"])): + os.makedirs(os.path.join(self.outDir, self.folder["results"])) + outputName = os.path.join(self.folder["results"], outputName) + + if self.region: + outputName += "_" + self.region + + outputName = os.path.join(self.outDir, outputName) + if self.greyFlag: + outputName += ".grey" + # if (os.path.exists(outputName + '.' + outputFormat)): + # self.__log__('createCompleteInputGraph: graph already exists at path, returning') + # return self.outDir, outputName + '.' + outputFormat + + nodes, tech, ltech, to_tech, from_tech = set(), set(), set(), set(), set() + + if DatabaseUtil.isDataBaseFile(self.dbFile): + res = self.dbUtil.getCommoditiesAndTech(inp_comm, inp_tech, region) + else: + res = self.dbUtil.readFromDatFile(inp_comm, inp_tech) + + self.__log__("createCompleteInputGraph: database fetched successfully") + # Create nodes and edges using the data frames from database + for i in range(len(res)): + row = res.iloc[i] + if row["input_comm"] != "ethos": + nodes.add(row["input_comm"]) + else: + ltech.add(row["tech"]) + nodes.add(row["output_comm"]) + tech.add(row["tech"]) + + if row["input_comm"] != "ethos": + to_tech.add('"%s"' % row["input_comm"] + '\t->\t"%s"' % row["tech"]) + from_tech.add('"%s"' % row["tech"] + '\t->\t"%s"' % row["output_comm"]) + + self.__log__("createCompleteInputGraph: creating diagrams") + + args = dict( + enodes="".join('"%s";\n\t\t' % x for x in nodes), + tnodes="".join('"%s";\n\t\t' % x for x in tech), + iedges="".join("%s;\n\t\t" % x for x in to_tech), + oedges="".join("%s;\n\t\t" % x for x in from_tech), + snodes=";".join('"%s"' % x for x in ltech), + ) + self.__generateGraph__(quick_run_dot_fmt, args, outputName, outputFormat) + self.__log__("createCompleteInputGraph: graph generated, returning") + return self.outDir, outputName + "." + outputFormat + + +if __name__ == "__main__": + input = processInput(sys.argv[1:]) + graphGen = GraphvizDiagramGenerator( + input["ifile"], input["scenario_name"], input["region"] + ) + graphGen.connect() + graphGen.setGraphicOptions( + greyFlag=input["grey_flag"], splinevar=input["splinevar"] + ) + if input["scenario_name"] is None: + res = graphGen.createCompleteInputGraph( + input["region"], input["inp_technology"], input["inp_commodity"] + ) + elif input["inp_technology"] is None and input["inp_commodity"] is None: + res = graphGen.CreateMainResultsDiagram(input["period"], input["region"]) + elif input["inp_commodity"] is None: + res = graphGen.CreateTechResultsDiagrams( + input["period"], input["region"], input["inp_technology"] + ) + elif input["inp_technology"] is None: + res = graphGen.CreateCommodityPartialResults( + input["period"], input["region"], input["inp_commodity"] + ) + graphGen.close() + print("Check graph generated at ", res[1], " and all results at ", res[0]) diff --git a/data_processing/MakeOutputPlots.py b/data_processing/MakeOutputPlots.py index 416c10a2..a4c265ec 100644 --- a/data_processing/MakeOutputPlots.py +++ b/data_processing/MakeOutputPlots.py @@ -1,6 +1,7 @@ import sqlite3, sys import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") from matplotlib import pyplot as plt, cm as cmx, colors from IPython import embed as IP import numpy as np @@ -11,326 +12,438 @@ class OutputPlotGenerator: + def __init__(self, path_to_db, region, scenario, super_categories=False): + self.db_path = os.path.abspath(path_to_db) + if region == "global": + self.region = "%" + else: + self.region = region + self.scenario = scenario + self.folder_name = ( + os.path.splitext(os.path.basename(path_to_db))[0] + + "_" + + region + + "_" + + scenario + + "_plots" + ) + # self.extractFromDatabase() + + def extractFromDatabase(self, type): + """ + Based on the type of the plot being generated, extract data from the corresponding table from database + """ + con = sqlite3.connect(self.db_path) + cur = con.cursor() + if type == 1: + cur.execute( + "SELECT sector, t_periods, tech, capacity FROM Output_CapacityByPeriodAndTech WHERE scenario == '" + + self.scenario + + "' AND regions LIKE '" + + self.region + + "'" + ) + self.capacity_output = cur.fetchall() + self.capacity_output = [list(elem) for elem in self.capacity_output] + elif type == 2: + cur.execute( + "SELECT sector, t_periods, tech, SUM(vflow_out) FROM Output_VFlow_Out WHERE scenario == '" + + self.scenario + + "' AND regions LIKE '" + + self.region + + "' GROUP BY sector, t_periods, tech" + ) + self.output_vflow = cur.fetchall() + self.output_vflow = [list(elem) for elem in self.output_vflow] + elif type == 3: + cur.execute( + "SELECT sector, t_periods, emissions_comm, SUM(emissions) FROM Output_Emissions WHERE scenario == '" + + self.scenario + + "' AND regions LIKE '" + + self.region + + "' GROUP BY sector, t_periods, emissions_comm" + ) + self.output_emissions = cur.fetchall() + self.output_emissions = [list(elem) for elem in self.output_emissions] + + cur.execute("SELECT tech, tech_category FROM technologies") + self.tech_categories = cur.fetchall() + self.tech_categories = [ + [str(word) for word in tuple] for tuple in self.tech_categories + ] + con.close() + + def getSectors(self, type): + """ + Based on the type of the plot being generated, returns a list of sectors available in the database + """ + self.extractFromDatabase(type) + sectors = set() + + data = None + + if type == 1: + data = self.capacity_output + elif type == 2: + data = self.output_vflow + elif type == 3: + data = self.output_emissions + + for row in data: + sectors.add(row[0]) + + res = list(sectors) + res.insert(0, "all") + return res + + def processData(self, inputData, sector, super_categories=False): + """ + Processes data for a particular sector to make it ready for plotting purposes + """ + periods = set() + techs = set() + + for row in inputData: + row[0] = str(row[0]) + row[1] = int(row[1]) + row[2] = str(row[2]) + row[3] = float(row[3]) + + tech_dict = dict(self.tech_categories) + if super_categories: + for row in inputData: + row[2] = tech_dict.get(row[2], row[2]) + + for row in inputData: + if row[0] == sector or sector == "all": + periods.add(row[1]) # Reminder: indexing starts at 0 + techs.add(row[2]) + + periods = list(periods) + techs = list(techs) + periods.sort() + + output_values = dict() # Each row in a dictionary is a list + for tech in techs: + if tech == "None" or tech == "": + continue + output_values[tech] = [0] * len(periods) # this just creates a blank table + for row in inputData: + if row[2] == "None" or row[2] == "": + continue + if row[0] == sector or sector == "all": + output_values[row[2]][periods.index(row[1])] += row[-1] + + output_values["periods"] = periods + return output_values + + def handleOutputPath(self, plot_type, sector, super_categories, output_dir): + outfile = plot_type + "_" + sector # +'_'+str(int(time.time()*1000))+'.png' + if super_categories: + outfile += "_merged" + outfile += ".png" + outfile2 = os.path.join(self.folder_name, outfile) + output_dir = os.path.join(output_dir, self.folder_name) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + self.output_file_name = os.path.join(output_dir, outfile) + + self.output_file_name = self.output_file_name.replace(" ", "") + return outfile2 + + def generatePlotForCapacity(self, sector, super_categories=False, output_dir="."): + """ + Generates Plot for Capacity of a given sector + """ + + outfile2 = self.handleOutputPath( + "capacity", sector, super_categories, output_dir + ) + if os.path.exists(self.output_file_name): + print("not generating new capacity plot") + return outfile2 + + sectors = self.getSectors(1) + + if not (sector in sectors): + return "" + + output_values = self.processData(self.capacity_output, sector, super_categories) + + if self.region == "%": + title = "Capacity Plot for " + sector + " across all regions" + else: + title = "Capacity Plot for " + sector + " sector in region " + self.region + + self.makeStackedBarPlot(output_values, "Years", "Capacity ", "periods", title) + + return outfile2 + + def generatePlotForOutputFlow(self, sector, super_categories=False, output_dir="."): + """ + Generates Plot for Output Flow of a given sector + """ + outfile2 = self.handleOutputPath("flow", sector, super_categories, output_dir) + if os.path.exists(self.output_file_name): + print("not generating new flow plot") + return outfile2 + + sectors = self.getSectors(2) + if not (sector in sectors): + return "" + + output_values = self.processData(self.output_vflow, sector, super_categories) + + if self.region == "%": + title = "Output Flow Plot for " + sector + " across all regions" + else: + title = ( + "Output Flow Plot for " + sector + " sector in region " + self.region + ) + + self.makeStackedBarPlot(output_values, "Years", "Activity ", "periods", title) + + return outfile2 + + def generatePlotForEmissions(self, sector, super_categories=False, output_dir="."): + """ + Generates Plot for Emissions of a given sector + """ + outfile2 = self.handleOutputPath( + "emissions", sector, super_categories, output_dir + ) + if os.path.exists(self.output_file_name): + print("not generating new emissions plot") + return outfile2 + + sectors = self.getSectors(3) + if not (sector in sectors): + return "" + + output_values = self.processData( + self.output_emissions, sector, super_categories + ) + + if self.region == "%": + title = "Emissions Plot for " + sector + " across all regions" + else: + title = "Emissions Plot for " + sector + " sector in region " + self.region + + self.make_line_plot(output_values.copy(), "Emissions", title) + + return outfile2 - def __init__(self, path_to_db, region, scenario, super_categories=False): - self.db_path = os.path.abspath(path_to_db) - if region == 'global': - self.region = '%' - else: - self.region = region - self.scenario = scenario - self.folder_name =os.path.splitext(os.path.basename(path_to_db))[0] + "_" + region + "_" + scenario + "_plots" - # self.extractFromDatabase() - - def extractFromDatabase(self, type): - ''' - Based on the type of the plot being generated, extract data from the corresponding table from database - ''' - con = sqlite3.connect(self.db_path) - cur = con.cursor() - if (type == 1): - cur.execute("SELECT sector, t_periods, tech, capacity FROM Output_CapacityByPeriodAndTech WHERE scenario == '"+self.scenario+"' AND regions LIKE '"+self.region+"'") - self.capacity_output = cur.fetchall() - self.capacity_output = [list(elem) for elem in self.capacity_output] - elif (type == 2): - cur.execute("SELECT sector, t_periods, tech, SUM(vflow_out) FROM Output_VFlow_Out WHERE scenario == '"+self.scenario+"' AND regions LIKE '"+self.region+"' GROUP BY sector, t_periods, tech") - self.output_vflow = cur.fetchall() - self.output_vflow = [list(elem) for elem in self.output_vflow] - elif (type == 3): - cur.execute("SELECT sector, t_periods, emissions_comm, SUM(emissions) FROM Output_Emissions WHERE scenario == '"+self.scenario+"' AND regions LIKE '"+self.region+"' GROUP BY sector, t_periods, emissions_comm") - self.output_emissions = cur.fetchall() - self.output_emissions = [list(elem) for elem in self.output_emissions] - - cur.execute("SELECT tech, tech_category FROM technologies") - self.tech_categories = cur.fetchall() - self.tech_categories = [[str(word) for word in tuple] for tuple in self.tech_categories] - con.close() - - - def getSectors(self, type): - ''' - Based on the type of the plot being generated, returns a list of sectors available in the database - ''' - self.extractFromDatabase(type) - sectors = set() - - data = None - - if (type == 1): - data = self.capacity_output - elif (type == 2): - data = self.output_vflow - elif (type == 3): - data = self.output_emissions - - for row in data: - sectors.add(row[0]) - - res = list(sectors) - res.insert(0,'all') - return res - - def processData(self,inputData, sector, super_categories=False): - ''' - Processes data for a particular sector to make it ready for plotting purposes - ''' - periods = set() - techs = set() - - for row in inputData: - row[0] = str(row[0]) - row[1] = int(row[1]) - row[2] = str(row[2]) - row[3] = float(row[3]) - - tech_dict = dict(self.tech_categories) - if (super_categories): - for row in inputData: - row[2] = tech_dict.get(row[2],row[2]) - - for row in inputData: - if (row[0] == sector or sector=='all'): - periods.add(row[1]) # Reminder: indexing starts at 0 - techs.add(row[2]) - - periods = list(periods) - techs = list(techs) - periods.sort() - - output_values = dict() # Each row in a dictionary is a list - for tech in techs: - if tech == 'None' or tech == '': - continue - output_values[tech] = [0]*len(periods) #this just creates a blank table - for row in inputData: - if row[2] == 'None' or row[2] == '': - continue - if (row[0] == sector or sector=='all'): - output_values[row[2]][periods.index(row[1])] += row[-1] - - output_values['periods']=periods - return output_values - - def handleOutputPath(self, plot_type, sector, super_categories, output_dir): - outfile = plot_type+'_'+sector#+'_'+str(int(time.time()*1000))+'.png' - if (super_categories): - outfile += '_merged' - outfile += '.png' - outfile2 = os.path.join(self.folder_name, outfile) - output_dir = os.path.join(output_dir, self.folder_name) - if (not os.path.exists(output_dir)): - os.makedirs(output_dir) - - self.output_file_name = os.path.join(output_dir, outfile) - - self.output_file_name = self.output_file_name.replace(" ", "") - return outfile2 - - - def generatePlotForCapacity(self,sector, super_categories=False, output_dir = '.'): - ''' - Generates Plot for Capacity of a given sector - ''' - - outfile2 = self.handleOutputPath('capacity', sector, super_categories, output_dir) - if (os.path.exists(self.output_file_name)): - print("not generating new capacity plot") - return outfile2 - - sectors = self.getSectors(1) - - if (not (sector in sectors)): - return "" - - output_values = self.processData(self.capacity_output, sector, super_categories) - - if self.region == '%': - title = 'Capacity Plot for ' + sector + ' across all regions' - else: - title = 'Capacity Plot for ' + sector + ' sector in region ' + self.region - - self.makeStackedBarPlot(output_values, "Years", "Capacity ", 'periods', title) - - return outfile2 - - def generatePlotForOutputFlow(self, sector, super_categories=False, output_dir = '.'): - ''' - Generates Plot for Output Flow of a given sector - ''' - outfile2 = self.handleOutputPath('flow', sector, super_categories, output_dir) - if (os.path.exists(self.output_file_name)): - print("not generating new flow plot") - return outfile2 - - sectors = self.getSectors(2) - if (not (sector in sectors)): - return "" - - output_values = self.processData(self.output_vflow, sector, super_categories) - - if self.region == '%': - title = 'Output Flow Plot for ' + sector + ' across all regions' - else: - title = 'Output Flow Plot for ' + sector + ' sector in region ' + self.region - - self.makeStackedBarPlot(output_values, "Years", "Activity ", 'periods', title) - - return outfile2; - - def generatePlotForEmissions(self, sector, super_categories=False, output_dir = '.'): - ''' - Generates Plot for Emissions of a given sector - ''' - outfile2 = self.handleOutputPath('emissions', sector, super_categories, output_dir) - if (os.path.exists(self.output_file_name)): - print("not generating new emissions plot") - return outfile2 - - sectors = self.getSectors(3) - if (not (sector in sectors)): - return "" - - output_values = self.processData(self.output_emissions, sector, super_categories) - - if self.region == '%': - title = 'Emissions Plot for ' + sector + ' across all regions' - else: - title = 'Emissions Plot for ' + sector + ' sector in region ' + self.region - - self.make_line_plot(output_values.copy(), 'Emissions', title) - - return outfile2; - - - ''' + """ --------------------------- Plot Generation related functions -------------------------------------- - ''' - def get_random_color(self, pastel_factor = 0.5): - return [(x+pastel_factor)/(1.0+pastel_factor) for x in [random.uniform(0,1.0) for i in [1,2,3]]] - - def color_distance(self, c1,c2): - return sum([abs(x[0]-x[1]) for x in zip(c1,c2)]) - - def get_cmap(self, N): - '''Returns a function that maps each index in 0, 1, ... N-1 to a distinct - RGB color.''' - color_norm = colors.Normalize(vmin=0, vmax=N-1) - # More colormaps: https://matplotlib.org/examples/color/colormaps_reference.html - scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='viridis') - def map_index_to_rgb_color(index): - return scalar_map.to_rgba(index) - return map_index_to_rgb_color - - def generate_new_color(self, existing_colors,pastel_factor = 0.5): - max_distance = None - best_color = None - for i in range(0,100): - color = self.get_random_color(pastel_factor = pastel_factor) - if not existing_colors: - return color - best_distance = min([self.color_distance(color,c) for c in existing_colors]) - if not max_distance or best_distance > max_distance: - max_distance = best_distance - best_color = color - return best_color - - def makeStackedBarPlot(self, data, xlabel, ylabel, xvar, title): - random.seed(10) - - handles = list() - xaxis=data[xvar] - data.pop('c',0) - data.pop(xvar,0) - stackedBars = data.keys() - colorMapForBars=dict() - colors = [] - plt.figure() - - cmap = self.get_cmap( len(stackedBars) ) - for i in range(0,len(stackedBars)): - # colors.append(self.generate_new_color(colors,pastel_factor = 0.9)) - # colorMapForBars[data.keys()[i]]=colors[i] - colorMapForBars[list(data.keys())[i]]=cmap(i) - - width = min([xaxis[i+1] - xaxis[i] for i in range(0, len(xaxis)-1)])/2.0 - b = [0]*len(xaxis) - - #plt.figure() - - for bar in stackedBars: - h = plt.bar(xaxis, data[bar], width, bottom = b, color = colorMapForBars[bar]) - handles.append(h) - b = [b[j] + data[bar][j] for j in range(0, len(b))] - - plt.xlabel(xlabel) - plt.ylabel(ylabel) - # plt.xticks([width*0.5 + i for i in xaxis], [str(i) for i in xaxis]) - plt.xticks([i for i in xaxis], [str(i) for i in xaxis]) - plt.title(title) - lgd = plt.legend([h[0] for h in handles], stackedBars, bbox_to_anchor = (1.2, 1),fontsize=7.5) - #plt.show() - plt.savefig(self.output_file_name, bbox_extra_artists=(lgd,), bbox_inches='tight') - - def make_line_plot(self, plot_var, label, title): - handles = list() - periods=plot_var['periods'] - plot_var.pop('periods',0) - techs = plot_var.keys() - random.seed(10) - color_map=dict() - colors = [] - width = 1.5 - plt.figure() - - cmap = self.get_cmap( len(techs) ) - for i in range(0,len(techs)): - # colors.append(self.generate_new_color(colors,pastel_factor = 0.9)) - # color_map[plot_var.keys()[i]]=colors[i] - color_map[plot_var.keys()[i]]=cmap(i) - - b = [0]*len(periods) - for tech in techs: - h = plt.plot(periods, plot_var[tech],color = color_map[tech], linestyle='--', marker='o') - handles.append(h) - - plt.xlabel("Years") - plt.ylabel(label) - #plt.xticks([i + width*0.5 for i in periods], [str(i) for i in periods]) - plt.xticks(periods) - plt.title(title) - lgd = plt.legend([h[0] for h in handles], techs, bbox_to_anchor = (1.2, 1),fontsize=7.5) - #plt.show() - plt.savefig(self.output_file_name, bbox_extra_artists=(lgd,), bbox_inches='tight') - + """ + + def get_random_color(self, pastel_factor=0.5): + return [ + (x + pastel_factor) / (1.0 + pastel_factor) + for x in [random.uniform(0, 1.0) for i in [1, 2, 3]] + ] + + def color_distance(self, c1, c2): + return sum([abs(x[0] - x[1]) for x in zip(c1, c2)]) + + def get_cmap(self, N): + """Returns a function that maps each index in 0, 1, ... N-1 to a distinct + RGB color.""" + color_norm = colors.Normalize(vmin=0, vmax=N - 1) + # More colormaps: https://matplotlib.org/examples/color/colormaps_reference.html + scalar_map = cmx.ScalarMappable(norm=color_norm, cmap="viridis") + + def map_index_to_rgb_color(index): + return scalar_map.to_rgba(index) + + return map_index_to_rgb_color + + def generate_new_color(self, existing_colors, pastel_factor=0.5): + max_distance = None + best_color = None + for i in range(0, 100): + color = self.get_random_color(pastel_factor=pastel_factor) + if not existing_colors: + return color + best_distance = min( + [self.color_distance(color, c) for c in existing_colors] + ) + if not max_distance or best_distance > max_distance: + max_distance = best_distance + best_color = color + return best_color + + def makeStackedBarPlot(self, data, xlabel, ylabel, xvar, title): + random.seed(10) + + handles = list() + xaxis = data[xvar] + data.pop("c", 0) + data.pop(xvar, 0) + stackedBars = data.keys() + colorMapForBars = dict() + colors = [] + plt.figure() + + cmap = self.get_cmap(len(stackedBars)) + for i in range(0, len(stackedBars)): + # colors.append(self.generate_new_color(colors,pastel_factor = 0.9)) + # colorMapForBars[data.keys()[i]]=colors[i] + colorMapForBars[list(data.keys())[i]] = cmap(i) + + width = min([xaxis[i + 1] - xaxis[i] for i in range(0, len(xaxis) - 1)]) / 2.0 + b = [0] * len(xaxis) + + # plt.figure() + + for bar in stackedBars: + h = plt.bar(xaxis, data[bar], width, bottom=b, color=colorMapForBars[bar]) + handles.append(h) + b = [b[j] + data[bar][j] for j in range(0, len(b))] + + plt.xlabel(xlabel) + plt.ylabel(ylabel) + # plt.xticks([width*0.5 + i for i in xaxis], [str(i) for i in xaxis]) + plt.xticks([i for i in xaxis], [str(i) for i in xaxis]) + plt.title(title) + lgd = plt.legend( + [h[0] for h in handles], stackedBars, bbox_to_anchor=(1.2, 1), fontsize=7.5 + ) + # plt.show() + plt.savefig( + self.output_file_name, bbox_extra_artists=(lgd,), bbox_inches="tight" + ) + + def make_line_plot(self, plot_var, label, title): + handles = list() + periods = plot_var["periods"] + plot_var.pop("periods", 0) + techs = plot_var.keys() + random.seed(10) + color_map = dict() + colors = [] + width = 1.5 + plt.figure() + + cmap = self.get_cmap(len(techs)) + for i in range(0, len(techs)): + # colors.append(self.generate_new_color(colors,pastel_factor = 0.9)) + # color_map[plot_var.keys()[i]]=colors[i] + color_map[plot_var.keys()[i]] = cmap(i) + + b = [0] * len(periods) + for tech in techs: + h = plt.plot( + periods, + plot_var[tech], + color=color_map[tech], + linestyle="--", + marker="o", + ) + handles.append(h) + + plt.xlabel("Years") + plt.ylabel(label) + # plt.xticks([i + width*0.5 for i in periods], [str(i) for i in periods]) + plt.xticks(periods) + plt.title(title) + lgd = plt.legend( + [h[0] for h in handles], techs, bbox_to_anchor=(1.2, 1), fontsize=7.5 + ) + # plt.show() + plt.savefig( + self.output_file_name, bbox_extra_artists=(lgd,), bbox_inches="tight" + ) # Function used for command line purposes. Parses arguments and then calls relevent functions. def GeneratePlot(args): - parser = argparse.ArgumentParser(description="Generate Output Plot") - parser.add_argument('-i', '--input', action="store", dest="input", help="Input Database Filename ", required=True) - parser.add_argument('-r', '--region', action="store", dest="region", help="Region name, input 'global' if global results are desired", required=True) - parser.add_argument('-s', '--scenario', action="store", dest="scenario", help="Model run scenario name", required=True) - parser.add_argument('-p', '--plot-type', action="store", dest="type", help="Type of Plot to be generated", choices=['capacity', 'flow', 'emissions'], required=True) - parser.add_argument('-c', '--sector', action="store", dest="sector", help="Sector for which plot to be generated", required=True) - parser.add_argument('-o', '--output', action="store", dest="output_dir", help='Output plot location', default='./') - parser.add_argument('--super', action="store_true", dest="super_categories", help="Merge Technologies or not", default=False) - - options = parser.parse_args(args) - - result = OutputPlotGenerator(options.input, options.region, options.scenario, options.super_categories) - error = '' - if (options.type == 'capacity'): - error = result.generatePlotForCapacity(options.sector, options.super_categories, options.output_dir) - elif (options.type == 'flow'): - error = result.generatePlotForOutputFlow(options.sector, options.super_categories, options.output_dir) - elif (options.type == 'emissions'): - error = result.generatePlotForEmissions(options.sector, options.super_categories, options.output_dir) - - if (error == ''): - print("Error: The sector doesn't exist for the selected plot type and database") - else: - print("Done. Look for output plot images in directory:"+os.path.join(options.output_dir,error)) - - -if __name__ == '__main__': - GeneratePlot(sys.argv[1:]) \ No newline at end of file + parser = argparse.ArgumentParser(description="Generate Output Plot") + parser.add_argument( + "-i", + "--input", + action="store", + dest="input", + help="Input Database Filename ", + required=True, + ) + parser.add_argument( + "-r", + "--region", + action="store", + dest="region", + help="Region name, input 'global' if global results are desired", + required=True, + ) + parser.add_argument( + "-s", + "--scenario", + action="store", + dest="scenario", + help="Model run scenario name", + required=True, + ) + parser.add_argument( + "-p", + "--plot-type", + action="store", + dest="type", + help="Type of Plot to be generated", + choices=["capacity", "flow", "emissions"], + required=True, + ) + parser.add_argument( + "-c", + "--sector", + action="store", + dest="sector", + help="Sector for which plot to be generated", + required=True, + ) + parser.add_argument( + "-o", + "--output", + action="store", + dest="output_dir", + help="Output plot location", + default="./", + ) + parser.add_argument( + "--super", + action="store_true", + dest="super_categories", + help="Merge Technologies or not", + default=False, + ) + + options = parser.parse_args(args) + + result = OutputPlotGenerator( + options.input, options.region, options.scenario, options.super_categories + ) + error = "" + if options.type == "capacity": + error = result.generatePlotForCapacity( + options.sector, options.super_categories, options.output_dir + ) + elif options.type == "flow": + error = result.generatePlotForOutputFlow( + options.sector, options.super_categories, options.output_dir + ) + elif options.type == "emissions": + error = result.generatePlotForEmissions( + options.sector, options.super_categories, options.output_dir + ) + + if error == "": + print("Error: The sector doesn't exist for the selected plot type and database") + else: + print( + "Done. Look for output plot images in directory:" + + os.path.join(options.output_dir, error) + ) + + +if __name__ == "__main__": + GeneratePlot(sys.argv[1:]) diff --git a/temoa_model/ReferenceModel.py b/temoa_model/ReferenceModel.py index 3e640644..7f46796b 120000 --- a/temoa_model/ReferenceModel.py +++ b/temoa_model/ReferenceModel.py @@ -1 +1 @@ -temoa_stochastic.py \ No newline at end of file +temoa_stochastic.py diff --git a/temoa_model/__main__.py b/temoa_model/__main__.py index 160b27b3..18290cd1 100644 --- a/temoa_model/__main__.py +++ b/temoa_model/__main__.py @@ -22,10 +22,9 @@ # This script is invoked when either the 'temoa_model' folder or the 'temoa.py' # zipped archived is called from the command line: # $ python temoa_model/ path/to/dat/file -# or +# or # $ python temoa.py path/to/dat/file from temoa_model import * runModel() - diff --git a/temoa_model/get_region.py b/temoa_model/get_region.py index 976e81ac..5e2642c1 100644 --- a/temoa_model/get_region.py +++ b/temoa_model/get_region.py @@ -5,8 +5,12 @@ def get_region_list(db_file): region_list = {} con = sqlite3.connect(db_file) - cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database - con.text_factory = str # this ensures data is explored with the correct UTF-8 encoding + cur = ( + con.cursor() + ) # a database cursor is a control structure that enables traversal over the records in a database + con.text_factory = ( + str # this ensures data is explored with the correct UTF-8 encoding + ) cur.execute("SELECT DISTINCT regions FROM regions") for row in cur: @@ -16,4 +20,4 @@ def get_region_list(db_file): cur.close() con.close() - return OrderedDict(sorted(region_list.items(), key=lambda x: x[1])) \ No newline at end of file + return OrderedDict(sorted(region_list.items(), key=lambda x: x[1])) diff --git a/temoa_model/pformat_results.py b/temoa_model/pformat_results.py index f42ae825..fa8b5b6a 100644 --- a/temoa_model/pformat_results.py +++ b/temoa_model/pformat_results.py @@ -20,13 +20,13 @@ """ # --------------------------------------------------------------------------- -# This module processes model output data, which can be sent to three possible +# This module processes model output data, which can be sent to three possible # locations: the shell, a user-specified database, or an Excel file. Users can # configure the available outputs. # --------------------------------------------------------------------------- -__all__ = ('pformat_results', 'stringify_data') +__all__ = ("pformat_results", "stringify_data") from collections import defaultdict from sys import stderr as SE, stdout as SO @@ -41,7 +41,7 @@ from temoa_config import TemoaConfig # Need line below to import DB_to_Excel.py from data_processing -sys.path.append(os.path.join(os.getcwd(), 'data_processing')) +sys.path.append(os.path.join(os.getcwd(), "data_processing")) from DB_to_Excel import make_excel # Ensure compatibility with Python 2.7 and 3 @@ -53,719 +53,957 @@ from pyomo.core import value -def stringify_data ( data, ostream=SO, format='plain' ): - # data is a list of tuples of ('var_name[index]', value) - # data must be a list, as this function replaces each row, - # format is currently unused, but will be utilized to implement things like - # csv - - # This padding code is what makes the display of the output values - # line up on the decimal point. - for i, (v, val) in enumerate( data ): - ipart, fpart = repr(f"{val:.6f}").split('.') - data[i] = (ipart, fpart, v) - cell_lengths = ( map(len, l[:-1] ) for l in data ) - max_lengths = map(max, zip(*cell_lengths)) # max length of each column - fmt = u' {{:>{:d}}}.{{:<{:d}}} {{}}\n'.format( *max_lengths ) - - for row in data: - ostream.write( fmt.format(*row) ) - - -def pformat_results ( pyomo_instance, pyomo_result, options ): - from pyomo.core import Objective, Var, Constraint - - output = StringIO() - - m = pyomo_instance # lazy typist - result = pyomo_result - - soln = result['Solution'] - solv = result['Solver'] # currently unused, but may want it later - prob = result['Problem'] # currently unused, but may want it later - - optimal_solutions = ( - 'feasible', 'globallyOptimal', 'locallyOptimal', 'optimal' - ) - if str(soln.Status) not in optimal_solutions: - output.write( 'No solution found.' ) - return output - - objs = list(m.component_data_objects( Objective )) - if len( objs ) > 1: - msg = '\nWarning: More than one objective. Using first objective.\n' - SE.write( msg ) - - Cons = soln.Constraint - - - def collect_result_data( cgroup, clist, epsilon): - # cgroup = "Component group"; i.e., Vars or Cons - # clist = "Component list"; i.e., where to store the data - # epsilon = absolute value below which to ignore a result - results = defaultdict(list) - for name, data in cgroup.items(): - if 'Value' not in data.keys() or (abs( data['Value'] ) < epsilon ) : continue - - # name looks like "Something[some,index]" - group, index = name[:-1].split('[') - results[ group ].append( (name.replace("'", ''), data['Value']) ) - clist.extend( t for i in sorted( results ) for t in sorted(results[i])) - - supp_outputs_df = pd.DataFrame.from_dict(cgroup, orient='index') - supp_outputs_df = supp_outputs_df.loc[(supp_outputs_df != 0).any(axis=1)] - if 'Dual' in supp_outputs_df.columns: - duals = supp_outputs_df['Dual'].copy() - duals = duals[abs(duals)>1e-5] - duals.index.name = 'constraint_name' - duals = duals.to_frame() - if (hasattr(options, 'scenario')) & (len(duals)>0): - duals.loc[:,'scenario'] = options.scenario - return duals - else: - return [] - - #Create a dictionary in which to store "solved" variable values - svars = defaultdict( lambda: defaultdict( float )) - - con_info = list() - epsilon = 1e-9 # threshold for "so small it's zero" - - emission_keys = { (r, i, t, v, o) : set() for r, e, i, t, v, o in m.EmissionActivity } - for r, e, i, t, v, o in m.EmissionActivity: - emission_keys[(r, i, t, v, o)].add(e) - P_0 = min( m.time_optimize ) - P_e = m.time_future.last() - GDR = value( m.GlobalDiscountRate ) - MPL = m.ModelProcessLife - LLN = m.LifetimeLoanProcess - x = 1 + GDR # convenience variable, nothing more - - if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') in options.file_location: - original_dbpath = options.output - con = sqlite3.connect(original_dbpath) - cur = con.cursor() - time_periods = cur.execute("SELECT t_periods FROM time_periods WHERE flag='f'").fetchall() - P_0 = time_periods[0][0] - P_e = time_periods[-1][0] - # We need to know if a myopic run is the last run or not. - P_e_time_optimize = time_periods[-2][0] - P_e_current = int(options.file_location.split("_")[-1]) - con.commit() - con.close() - - # Extract optimal decision variable values related to commodity flow: - for r, p, s, d, t, v in m.V_StorageLevel: - val = value( m.V_StorageLevel[r, p, s, d, t, v] ) - if abs(val) < epsilon: continue - - svars['V_StorageLevel'][r, p, s, d, t, v] = val - - # vflow_in is defined only for storage techs - for r, p, s, d, i, t, v, o in m.V_FlowIn: - val_in = value( m.V_FlowIn[r, p, s, d, i, t, v, o] ) - if abs(val_in) < epsilon: continue - - svars['V_FlowIn'][r, p, s, d, i, t, v, o] = val_in - - for r, p, s, d, i, t, v, o in m.V_FlowOut: - val_out = value( m.V_FlowOut[r, p, s, d, i, t, v, o] ) - if abs(val_out) < epsilon: continue - - svars['V_FlowOut'][r, p, s, d, i, t, v, o] = val_out - - if t not in m.tech_storage: - val_in = value( m.V_FlowOut[r, p, s, d, i, t, v, o] ) / value(m.Efficiency[r, i, t, v, o]) - svars['V_FlowIn'][r, p, s, d, i, t, v, o] = val_in - - if (r, i, t, v, o) not in emission_keys: continue - - emissions = emission_keys[r, i, t, v, o] - for e in emissions: - evalue = val_out * m.EmissionActivity[r, e, i, t, v, o] - svars[ 'V_EmissionActivityByPeriodAndProcess' ][r, p, e, t, v] += evalue - - for r, p, i, t, v, o in m.V_FlowOutAnnual: - for s in m.time_season: - for d in m.time_of_day: - val_out = value( m.V_FlowOutAnnual[r, p, i, t, v, o] ) * value( m.SegFrac[s , d ]) - if abs(val_out) < epsilon: continue - svars['V_FlowOut'][r, p, s, d, i, t, v, o] = val_out - svars['V_FlowIn'][r, p, s, d, i, t, v, o] = val_out / value(m.Efficiency[r, i, t, v, o]) - if (r, i, t, v, o) not in emission_keys: continue - emissions = emission_keys[r, i, t, v, o] - for e in emissions: - evalue = val_out * m.EmissionActivity[r, e, i, t, v, o] - svars[ 'V_EmissionActivityByPeriodAndProcess' ][r, p, e, t, v] += evalue - - for r, p, s, d, i, t, v, o in m.V_Curtailment: - val = value( m.V_Curtailment[r, p, s, d, i, t, v, o] ) - if abs(val) < epsilon: continue - svars['V_Curtailment'][r, p, s, d, i, t, v, o] = val - svars['V_FlowIn'][r, p, s, d, i, t, v, o] = (val + value( m.V_FlowOut[r, p, s, d, i, t, v, o] )) / value(m.Efficiency[r, i, t, v, o]) - - if (r, i, t, v, o) not in emission_keys: continue - - emissions = emission_keys[r, i, t, v, o] - for e in emissions: - evalue = val * m.EmissionActivity[r, e, i, t, v, o] - svars[ 'V_EmissionActivityByPeriodAndProcess' ][r, p, e, t, v] += evalue - - for r, p, i, t, v, o in m.V_FlexAnnual: - for s in m.time_season: - for d in m.time_of_day: - val_out = value( m.V_FlexAnnual[r, p, i, t, v, o] ) * value( m.SegFrac[s , d ]) - if abs(val_out) < epsilon: continue - svars['V_Curtailment'][r, p, s, d, i, t, v, o] = val_out - svars['V_FlowOut'][r, p, s, d, i, t, v, o] -= val_out - - - for r, p, s, d, i, t, v, o in m.V_Flex: - val_out = value( m.V_Flex[r, p, s, d, i, t, v, o] ) - if abs(val_out) < epsilon: continue - svars['V_Curtailment'][r, p, s, d, i, t, v, o] = val_out - svars['V_FlowOut'][r, p, s, d, i, t, v, o] -= val_out - - # Extract optimal decision variable values related to capacity: - if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location: - for r, t, v in m.V_Capacity: - val = value( m.V_Capacity[r, t, v] ) - if abs(val) < epsilon: continue - svars['V_Capacity'][r, t, v] = val - else: - for r, t, v in m.V_Capacity: - if v in m.time_optimize: - val = value( m.V_Capacity[r, t, v] ) - if abs(val) < epsilon: continue - svars['V_Capacity'][r, t, v] = val - - for r, p, t in m.V_CapacityAvailableByPeriodAndTech: - val = value( m.V_CapacityAvailableByPeriodAndTech[r, p, t] ) - if abs(val) < epsilon: continue - svars['V_CapacityAvailableByPeriodAndTech'][r, p, t] = val - - # Calculate model costs: - if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location: - # This is a generic workaround. Not sure how else to automatically discover - # the objective name - obj_name, obj_value = objs[0].getname(True), value( objs[0] ) - svars[ 'Objective' ]["('"+obj_name+"')"] = obj_value - - for r, t, v in m.CostInvest.sparse_iterkeys(): # Returns only non-zero values - - icost = value( m.V_Capacity[r, t, v] ) - if abs(icost) < epsilon: continue - icost *= value( m.CostInvest[r, t, v] )*( - ( - 1 - x**( -min( value(m.LifetimeProcess[r, t, v]), P_e - v ) ) - )/( - 1 - x**( -value( m.LifetimeProcess[r, t, v] ) ) - ) - ) - svars[ 'Costs' ][ 'V_UndiscountedInvestmentByProcess', r, t, v] += icost - - icost *= value( m.LoanAnnualize[r, t, v] ) - icost *= ( - value( LLN[r, t, v] ) if not GDR else - (x **(P_0 - v + 1) * (1 - x **(-value( LLN[r, t, v] ))) / GDR) - ) - - svars[ 'Costs' ][ 'V_DiscountedInvestmentByProcess', r, t, v] += icost - - - for r, p, t, v in m.CostFixed.sparse_iterkeys(): - fcost = value( m.V_Capacity[r, t, v] ) - if abs(fcost) < epsilon: continue - - fcost *= value( m.CostFixed[r, p, t, v] ) - svars[ 'Costs' ][ 'V_UndiscountedFixedCostsByProcess', r, t, v] += fcost * value( MPL[r, p, t, v] ) - - fcost *= ( - value( MPL[r, p, t, v] ) if not GDR else - (x **(P_0 - p + 1) * (1 - x **(-value( MPL[r, p, t, v] ))) / GDR) - ) - - svars[ 'Costs' ][ 'V_DiscountedFixedCostsByProcess', r, t, v] += fcost - - for r, p, t, v in m.CostVariable.sparse_iterkeys(): - if t not in m.tech_annual: - vcost = sum( - value (m.V_FlowOut[r, p, S_s, S_d, S_i, t, v, S_o]) - for S_i in m.processInputs[r, p, t, v] - for S_o in m.ProcessOutputsByInput[r, p, t, v, S_i] - for S_s in m.time_season - for S_d in m.time_of_day - ) - else: - vcost = sum( - value (m.V_FlowOutAnnual[r, p, S_i, t, v, S_o]) - for S_i in m.processInputs[r, p, t, v] - for S_o in m.ProcessOutputsByInput[r, p, t, v, S_i] - ) - if abs(vcost) < epsilon: continue - - vcost *= value( m.CostVariable[r, p, t, v] ) - svars[ 'Costs' ][ 'V_UndiscountedVariableCostsByProcess', r, t, v] += vcost * value( MPL[r, p, t, v] ) - vcost *= ( - value( MPL[r, p, t, v] ) if not GDR else - (x **(P_0 - p + 1) * (1 - x **(-value( MPL[r, p, t, v] ))) / GDR) - ) - svars[ 'Costs' ][ 'V_DiscountedVariableCostsByProcess', r, t, v] += vcost - - - - #update the costs of exchange technologies. - #Assumption 1: If Ri-Rj appears in the cost tables but Rj-Ri does not, - #then the total costs are distributed between the regions - #Ri and Rj proportional to their use of the exchange technology connecting the - #regions. - #Assumption 2: If both the directional entries appear in the cost tables, - #Assumption 1 is no longer applied and the costs are calculated as they - #are entered in the cost tables. - # assumption 3: Unlike other output tables in which Ri-Rj and Rj-Ri entries - # are allowed in the region column, for the Output_Costs table the region - #to the right of the hyphen sign gets the costs. - for i in m.RegionalExchangeCapacityConstraint_rrtv.iterkeys(): - reg_dir1 = i[0]+"-"+i[1] - reg_dir2 = i[1]+"-"+i[0] - tech = i[2] - vintage = i[3] - key = (reg_dir1, tech, vintage) - try: - act_dir1 = value (sum(m.V_FlowOut[reg_dir1, p, s, d, S_i, tech, vintage, S_o] - for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage) - for s in m.time_season - for d in m.time_of_day - for S_i in m.processInputs[reg_dir1, p, tech, vintage] - for S_o in m.ProcessOutputsByInput[reg_dir1, p, tech, vintage, S_i] - )) - act_dir2 = value (sum(m.V_FlowOut[reg_dir2, p, s, d, S_i, tech, vintage, S_o] - for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage) - for s in m.time_season - for d in m.time_of_day - for S_i in m.processInputs[reg_dir2, p, tech, vintage] - for S_o in m.ProcessOutputsByInput[reg_dir2, p, tech, vintage, S_i] - )) - except: - act_dir1 = value (sum(m.V_FlowOutAnnual[reg_dir1, p, S_i, tech, vintage, S_o] - for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage) - for S_i in m.processInputs[reg_dir1, p, tech, vintage] - for S_o in m.ProcessOutputsByInput[reg_dir1, p, tech, vintage, S_i] - )) - act_dir2 = value (sum(m.V_FlowOutAnnual[reg_dir2, p, S_i, tech, vintage, S_o] - for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage) - for S_i in m.processInputs[reg_dir2, p, tech, vintage] - for S_o in m.ProcessOutputsByInput[reg_dir2, p, tech, vintage, S_i] - )) - - for item in list(svars[ 'Costs' ]): - if item[2] == tech: - opposite_dir = item[1][item[1].find("-")+1:]+"-"+item[1][:item[1].find("-")] - if (item[0],opposite_dir,item[2],item[3]) in svars[ 'Costs' ].keys(): - continue #if both directional entries are already in svars[ 'Costs' ], they're left intact. - if item[1] == reg_dir1: - if (act_dir1+act_dir2)>0: - svars[ 'Costs' ][(item[0],reg_dir2,item[2],item[3])] = svars[ 'Costs' ][item] * act_dir2 / (act_dir1 + act_dir2) - svars[ 'Costs' ][item] = svars[ 'Costs' ][item] * act_dir1 / (act_dir1 + act_dir2) - - - #Remove Ri-Rj entries from being populated in the Outputs_Costs. Ri-Rj means a cost - #for region Rj - for item in list(svars[ 'Costs' ]): - if item[2] in m.tech_exchange: - svars[ 'Costs' ][(item[0],item[1][item[1].find("-")+1:],item[2],item[3])] = svars[ 'Costs' ][item] - del svars[ 'Costs' ][item] - - if options.saveDUALS: - duals = collect_result_data( Cons, con_info, epsilon=1e-9 ) - - msg = ( 'Model name: %s\n' - 'Objective function value (%s): %s\n' - 'Non-zero variable values:\n' - ) - if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location: - output.write( msg % (m.name, obj_name, obj_value) ) - - def make_var_list ( variables ): - var_list = [] - for vgroup, values in sorted( variables.items() ): - for vindex, val in sorted( values.items() ): - if isinstance( vindex, tuple ): - vindex = ','.join( str(i) for i in vindex ) - var_list.append(( '{}[{}]'.format(vgroup, vindex), val )) - return var_list - - if svars: - stringify_data( make_var_list(svars), output ) - else: - output.write( '\nAll variables have a zero (0) value.\n' ) - - if len( con_info ) > 0: - output.write( '\nBinding constraint values:\n' ) - stringify_data( con_info, output ) - del con_info - else: - # Since not all Coopr solvers give constraint results, must check - msg = '\nSelected Coopr solver plugin does not give constraint data.\n' - output.write( msg ) - - output.write( '\n\nIf you use these results for a published article, ' - "please run Temoa with the '--how_to_cite' command line argument for " - 'citation information.\n') - - # ----------------------------------------------------------------- - # Write outputs stored in dictionary to the user-specified database - # ----------------------------------------------------------------- - - # Table dictionary below maps variable names to database table names - tables = { "V_FlowIn" : "Output_VFlow_In", \ - "V_FlowOut" : "Output_VFlow_Out", \ - "V_Curtailment" : "Output_Curtailment", \ - "V_Capacity" : "Output_V_Capacity", \ - "V_CapacityAvailableByPeriodAndTech" : "Output_CapacityByPeriodAndTech", \ - "V_EmissionActivityByPeriodAndProcess" : "Output_Emissions", \ - "Objective" : "Output_Objective", \ - "Costs" : "Output_Costs" - } - - db_tables = ['time_periods', 'time_season', 'time_of_day', 'technologies', 'commodities',\ - 'LifetimeTech', 'LifetimeProcess', 'Efficiency', 'EmissionActivity', 'ExistingCapacity'] - - if isinstance(options, TemoaConfig): - if not options.output: - if options.saveTEXTFILE or options.keepPyomoLP: - for inpu in options.dot_dat: - print(inpu) - file_ty = re.search(r"\b([\w-]+)\.(\w+)\b", inpu) - new_dir = options.path_to_data+os.sep+file_ty.group(1)+'_'+options.scenario+'_model' - if os.path.exists( new_dir ): - rmtree( new_dir ) - os.mkdir(new_dir) - print("No Output File specified.") - return output - - if not os.path.exists(options.output) : - print("Please put the "+options.output+" file in the right Directory") - return output - - - con = sqlite3.connect(options.output) - cur = con.cursor() # A database cursor enables traversal over DB records - con.text_factory = str # This ensures data is explored with UTF-8 encoding - - ### Copy tables from Input File to DB file. - # IF output file is empty database. - cur.execute("SELECT * FROM technologies") - is_db_empty = False #False for empty db file - for elem in cur: - is_db_empty = True #True for non-empty db file - break - - - if is_db_empty: #This file could be schema with populated results from previous run. Or it could be a normal db file. - cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='input_file';") - does_input_file_table_exist = False - for i in cur: # This means that the 'input_file' table exists in db. - does_input_file_table_exist = True - if does_input_file_table_exist: #This block distinguishes normal database from schema. - #This is schema file. - cur.execute("SELECT file FROM input_file WHERE id is '1';") - for i in cur: - tagged_file = i[0] - tagged_file = re.sub('["]', "", tagged_file) - - if tagged_file == options.dot_dat[0]: - #If Input_file name matches, add output and check tech/comm - dat_to_db(options.dot_dat[0], con) - else: - #If not a match, delete output tables and update input_file. Call dat_to_db - for i in db_tables: - cur.execute("DELETE FROM "+i+";") - cur.execute("VACUUM;") - - for i in tables.keys(): - cur.execute("DELETE FROM "+tables[i]+";") - cur.execute("VACUUM;") - - for i in options.dot_dat: - cur.execute("DELETE FROM input_file WHERE id=1;") - cur.execute("INSERT INTO input_file VALUES(1, '"+i+"');") - break - dat_to_db(i, con) - - else: #empty schema db file - cur.execute("CREATE TABLE IF NOT EXISTS input_file ( id integer PRIMARY KEY, file varchar(30));") - - for i in tables.keys(): - cur.execute("DELETE FROM "+tables[i]+";") - cur.execute("VACUUM;") - - for i in options.dot_dat: - cur.execute("DELETE FROM input_file WHERE id=1;") - cur.execute("INSERT INTO input_file(id, file) VALUES(?, ?);", (1, '"'+i+'"')) - break - dat_to_db(i, con) - - - for table in svars.keys() : - if table in tables : - cur.execute("SELECT DISTINCT scenario FROM '"+tables[table]+"'") - for val in cur : - # If scenario exists, delete unless it's a myopic run (for myopic, the scenario results are deleted - # before the run in temoa_config.py) - if hasattr(options, 'file_location') and options.scenario == val[0] and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location: - cur.execute("DELETE FROM "+tables[table]+" \ - WHERE scenario is '"+options.scenario+"'") - if table == 'Objective' : # Only table without sector info - for key in svars[table].keys(): - key_str = str(key) # only 1 row to write - key_str = key_str[1:-1] # Remove parentheses - cur.execute("INSERT INTO "+tables[table]+" \ - VALUES('"+options.scenario+"',"+key_str+", \ - "+str(svars[table][key])+");") - else : # First add 'NULL' for sector then update - for key in svars[table].keys() : # Need to loop over keys (rows) - key_str = str(key) - key_str = key_str[1:-1] # Remove parentheses - if table != 'Costs': - cur.execute("INSERT INTO "+tables[table]+ \ - " VALUES('"+str(key[0])+"', '"+options.scenario+"','NULL', \ - "+key_str[key_str.find(',')+1:]+","+str(svars[table][key])+");") - else: - key_str = str((key[0],key[2],key[3])) - key_str = key_str[1:-1] # Remove parentheses - cur.execute("INSERT INTO "+tables[table]+ \ - " VALUES('"+str(key[1])+"', '"+options.scenario+"','NULL', \ - "+key_str+","+str(svars[table][key])+");") - cur.execute("UPDATE "+tables[table]+" SET sector = \ +def stringify_data(data, ostream=SO, format="plain"): + # data is a list of tuples of ('var_name[index]', value) + # data must be a list, as this function replaces each row, + # format is currently unused, but will be utilized to implement things like + # csv + + # This padding code is what makes the display of the output values + # line up on the decimal point. + for i, (v, val) in enumerate(data): + ipart, fpart = repr(f"{val:.6f}").split(".") + data[i] = (ipart, fpart, v) + cell_lengths = (map(len, l[:-1]) for l in data) + max_lengths = map(max, zip(*cell_lengths)) # max length of each column + fmt = " {{:>{:d}}}.{{:<{:d}}} {{}}\n".format(*max_lengths) + + for row in data: + ostream.write(fmt.format(*row)) + + +def pformat_results(pyomo_instance, pyomo_result, options): + from pyomo.core import Objective, Var, Constraint + + output = StringIO() + + m = pyomo_instance # lazy typist + result = pyomo_result + + soln = result["Solution"] + solv = result["Solver"] # currently unused, but may want it later + prob = result["Problem"] # currently unused, but may want it later + + optimal_solutions = ("feasible", "globallyOptimal", "locallyOptimal", "optimal") + if str(soln.Status) not in optimal_solutions: + output.write("No solution found.") + return output + + objs = list(m.component_data_objects(Objective)) + if len(objs) > 1: + msg = "\nWarning: More than one objective. Using first objective.\n" + SE.write(msg) + + Cons = soln.Constraint + + def collect_result_data(cgroup, clist, epsilon): + # cgroup = "Component group"; i.e., Vars or Cons + # clist = "Component list"; i.e., where to store the data + # epsilon = absolute value below which to ignore a result + results = defaultdict(list) + for name, data in cgroup.items(): + if "Value" not in data.keys() or (abs(data["Value"]) < epsilon): + continue + + # name looks like "Something[some,index]" + group, index = name[:-1].split("[") + results[group].append((name.replace("'", ""), data["Value"])) + clist.extend(t for i in sorted(results) for t in sorted(results[i])) + + supp_outputs_df = pd.DataFrame.from_dict(cgroup, orient="index") + supp_outputs_df = supp_outputs_df.loc[(supp_outputs_df != 0).any(axis=1)] + if "Dual" in supp_outputs_df.columns: + duals = supp_outputs_df["Dual"].copy() + duals = duals[abs(duals) > 1e-5] + duals.index.name = "constraint_name" + duals = duals.to_frame() + if (hasattr(options, "scenario")) & (len(duals) > 0): + duals.loc[:, "scenario"] = options.scenario + return duals + else: + return [] + + # Create a dictionary in which to store "solved" variable values + svars = defaultdict(lambda: defaultdict(float)) + + con_info = list() + epsilon = 1e-9 # threshold for "so small it's zero" + + emission_keys = {(r, i, t, v, o): set() for r, e, i, t, v, o in m.EmissionActivity} + for r, e, i, t, v, o in m.EmissionActivity: + emission_keys[(r, i, t, v, o)].add(e) + P_0 = min(m.time_optimize) + P_e = m.time_future.last() + GDR = value(m.GlobalDiscountRate) + MPL = m.ModelProcessLife + LLN = m.LifetimeLoanProcess + x = 1 + GDR # convenience variable, nothing more + + if ( + hasattr(options, "file_location") + and os.path.join("temoa_model", "config_sample_myopic") in options.file_location + ): + original_dbpath = options.output + con = sqlite3.connect(original_dbpath) + cur = con.cursor() + time_periods = cur.execute( + "SELECT t_periods FROM time_periods WHERE flag='f'" + ).fetchall() + P_0 = time_periods[0][0] + P_e = time_periods[-1][0] + # We need to know if a myopic run is the last run or not. + P_e_time_optimize = time_periods[-2][0] + P_e_current = int(options.file_location.split("_")[-1]) + con.commit() + con.close() + + # Extract optimal decision variable values related to commodity flow: + for r, p, s, d, t, v in m.V_StorageLevel: + val = value(m.V_StorageLevel[r, p, s, d, t, v]) + if abs(val) < epsilon: + continue + + svars["V_StorageLevel"][r, p, s, d, t, v] = val + + # vflow_in is defined only for storage techs + for r, p, s, d, i, t, v, o in m.V_FlowIn: + val_in = value(m.V_FlowIn[r, p, s, d, i, t, v, o]) + if abs(val_in) < epsilon: + continue + + svars["V_FlowIn"][r, p, s, d, i, t, v, o] = val_in + + for r, p, s, d, i, t, v, o in m.V_FlowOut: + val_out = value(m.V_FlowOut[r, p, s, d, i, t, v, o]) + if abs(val_out) < epsilon: + continue + + svars["V_FlowOut"][r, p, s, d, i, t, v, o] = val_out + + if t not in m.tech_storage: + val_in = value(m.V_FlowOut[r, p, s, d, i, t, v, o]) / value( + m.Efficiency[r, i, t, v, o] + ) + svars["V_FlowIn"][r, p, s, d, i, t, v, o] = val_in + + if (r, i, t, v, o) not in emission_keys: + continue + + emissions = emission_keys[r, i, t, v, o] + for e in emissions: + evalue = val_out * m.EmissionActivity[r, e, i, t, v, o] + svars["V_EmissionActivityByPeriodAndProcess"][r, p, e, t, v] += evalue + + for r, p, i, t, v, o in m.V_FlowOutAnnual: + for s in m.time_season: + for d in m.time_of_day: + val_out = value(m.V_FlowOutAnnual[r, p, i, t, v, o]) * value( + m.SegFrac[s, d] + ) + if abs(val_out) < epsilon: + continue + svars["V_FlowOut"][r, p, s, d, i, t, v, o] = val_out + svars["V_FlowIn"][r, p, s, d, i, t, v, o] = val_out / value( + m.Efficiency[r, i, t, v, o] + ) + if (r, i, t, v, o) not in emission_keys: + continue + emissions = emission_keys[r, i, t, v, o] + for e in emissions: + evalue = val_out * m.EmissionActivity[r, e, i, t, v, o] + svars["V_EmissionActivityByPeriodAndProcess"][ + r, p, e, t, v + ] += evalue + + for r, p, s, d, i, t, v, o in m.V_Curtailment: + val = value(m.V_Curtailment[r, p, s, d, i, t, v, o]) + if abs(val) < epsilon: + continue + svars["V_Curtailment"][r, p, s, d, i, t, v, o] = val + svars["V_FlowIn"][r, p, s, d, i, t, v, o] = ( + val + value(m.V_FlowOut[r, p, s, d, i, t, v, o]) + ) / value(m.Efficiency[r, i, t, v, o]) + + if (r, i, t, v, o) not in emission_keys: + continue + + emissions = emission_keys[r, i, t, v, o] + for e in emissions: + evalue = val * m.EmissionActivity[r, e, i, t, v, o] + svars["V_EmissionActivityByPeriodAndProcess"][r, p, e, t, v] += evalue + + for r, p, i, t, v, o in m.V_FlexAnnual: + for s in m.time_season: + for d in m.time_of_day: + val_out = value(m.V_FlexAnnual[r, p, i, t, v, o]) * value( + m.SegFrac[s, d] + ) + if abs(val_out) < epsilon: + continue + svars["V_Curtailment"][r, p, s, d, i, t, v, o] = val_out + svars["V_FlowOut"][r, p, s, d, i, t, v, o] -= val_out + + for r, p, s, d, i, t, v, o in m.V_Flex: + val_out = value(m.V_Flex[r, p, s, d, i, t, v, o]) + if abs(val_out) < epsilon: + continue + svars["V_Curtailment"][r, p, s, d, i, t, v, o] = val_out + svars["V_FlowOut"][r, p, s, d, i, t, v, o] -= val_out + + # Extract optimal decision variable values related to capacity: + if ( + hasattr(options, "file_location") + and os.path.join("temoa_model", "config_sample_myopic") + not in options.file_location + ): + for r, t, v in m.V_Capacity: + val = value(m.V_Capacity[r, t, v]) + if abs(val) < epsilon: + continue + svars["V_Capacity"][r, t, v] = val + else: + for r, t, v in m.V_Capacity: + if v in m.time_optimize: + val = value(m.V_Capacity[r, t, v]) + if abs(val) < epsilon: + continue + svars["V_Capacity"][r, t, v] = val + + for r, p, t in m.V_CapacityAvailableByPeriodAndTech: + val = value(m.V_CapacityAvailableByPeriodAndTech[r, p, t]) + if abs(val) < epsilon: + continue + svars["V_CapacityAvailableByPeriodAndTech"][r, p, t] = val + + # Calculate model costs: + if ( + hasattr(options, "file_location") + and os.path.join("temoa_model", "config_sample_myopic") + not in options.file_location + ): + # This is a generic workaround. Not sure how else to automatically discover + # the objective name + obj_name, obj_value = objs[0].getname(True), value(objs[0]) + svars["Objective"]["('" + obj_name + "')"] = obj_value + + for r, t, v in m.CostInvest.sparse_iterkeys(): # Returns only non-zero values + icost = value(m.V_Capacity[r, t, v]) + if abs(icost) < epsilon: + continue + icost *= value(m.CostInvest[r, t, v]) * ( + (1 - x ** (-min(value(m.LifetimeProcess[r, t, v]), P_e - v))) + / (1 - x ** (-value(m.LifetimeProcess[r, t, v]))) + ) + svars["Costs"]["V_UndiscountedInvestmentByProcess", r, t, v] += icost + + icost *= value(m.LoanAnnualize[r, t, v]) + icost *= ( + value(LLN[r, t, v]) + if not GDR + else (x ** (P_0 - v + 1) * (1 - x ** (-value(LLN[r, t, v]))) / GDR) + ) + + svars["Costs"]["V_DiscountedInvestmentByProcess", r, t, v] += icost + + for r, p, t, v in m.CostFixed.sparse_iterkeys(): + fcost = value(m.V_Capacity[r, t, v]) + if abs(fcost) < epsilon: + continue + + fcost *= value(m.CostFixed[r, p, t, v]) + svars["Costs"][ + "V_UndiscountedFixedCostsByProcess", r, t, v + ] += fcost * value(MPL[r, p, t, v]) + + fcost *= ( + value(MPL[r, p, t, v]) + if not GDR + else (x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, t, v]))) / GDR) + ) + + svars["Costs"]["V_DiscountedFixedCostsByProcess", r, t, v] += fcost + + for r, p, t, v in m.CostVariable.sparse_iterkeys(): + if t not in m.tech_annual: + vcost = sum( + value(m.V_FlowOut[r, p, S_s, S_d, S_i, t, v, S_o]) + for S_i in m.processInputs[r, p, t, v] + for S_o in m.ProcessOutputsByInput[r, p, t, v, S_i] + for S_s in m.time_season + for S_d in m.time_of_day + ) + else: + vcost = sum( + value(m.V_FlowOutAnnual[r, p, S_i, t, v, S_o]) + for S_i in m.processInputs[r, p, t, v] + for S_o in m.ProcessOutputsByInput[r, p, t, v, S_i] + ) + if abs(vcost) < epsilon: + continue + + vcost *= value(m.CostVariable[r, p, t, v]) + svars["Costs"][ + "V_UndiscountedVariableCostsByProcess", r, t, v + ] += vcost * value(MPL[r, p, t, v]) + vcost *= ( + value(MPL[r, p, t, v]) + if not GDR + else (x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, t, v]))) / GDR) + ) + svars["Costs"]["V_DiscountedVariableCostsByProcess", r, t, v] += vcost + + # update the costs of exchange technologies. + # Assumption 1: If Ri-Rj appears in the cost tables but Rj-Ri does not, + # then the total costs are distributed between the regions + # Ri and Rj proportional to their use of the exchange technology connecting the + # regions. + # Assumption 2: If both the directional entries appear in the cost tables, + # Assumption 1 is no longer applied and the costs are calculated as they + # are entered in the cost tables. + # assumption 3: Unlike other output tables in which Ri-Rj and Rj-Ri entries + # are allowed in the region column, for the Output_Costs table the region + # to the right of the hyphen sign gets the costs. + for i in m.RegionalExchangeCapacityConstraint_rrtv.iterkeys(): + reg_dir1 = i[0] + "-" + i[1] + reg_dir2 = i[1] + "-" + i[0] + tech = i[2] + vintage = i[3] + key = (reg_dir1, tech, vintage) + try: + act_dir1 = value( + sum( + m.V_FlowOut[reg_dir1, p, s, d, S_i, tech, vintage, S_o] + for p in m.time_optimize + if ( + p + < vintage + + value(m.LifetimeProcess[reg_dir1, tech, vintage]) + ) + and (p >= vintage) + for s in m.time_season + for d in m.time_of_day + for S_i in m.processInputs[reg_dir1, p, tech, vintage] + for S_o in m.ProcessOutputsByInput[ + reg_dir1, p, tech, vintage, S_i + ] + ) + ) + act_dir2 = value( + sum( + m.V_FlowOut[reg_dir2, p, s, d, S_i, tech, vintage, S_o] + for p in m.time_optimize + if ( + p + < vintage + + value(m.LifetimeProcess[reg_dir1, tech, vintage]) + ) + and (p >= vintage) + for s in m.time_season + for d in m.time_of_day + for S_i in m.processInputs[reg_dir2, p, tech, vintage] + for S_o in m.ProcessOutputsByInput[ + reg_dir2, p, tech, vintage, S_i + ] + ) + ) + except: + act_dir1 = value( + sum( + m.V_FlowOutAnnual[reg_dir1, p, S_i, tech, vintage, S_o] + for p in m.time_optimize + if ( + p + < vintage + + value(m.LifetimeProcess[reg_dir1, tech, vintage]) + ) + and (p >= vintage) + for S_i in m.processInputs[reg_dir1, p, tech, vintage] + for S_o in m.ProcessOutputsByInput[ + reg_dir1, p, tech, vintage, S_i + ] + ) + ) + act_dir2 = value( + sum( + m.V_FlowOutAnnual[reg_dir2, p, S_i, tech, vintage, S_o] + for p in m.time_optimize + if ( + p + < vintage + + value(m.LifetimeProcess[reg_dir1, tech, vintage]) + ) + and (p >= vintage) + for S_i in m.processInputs[reg_dir2, p, tech, vintage] + for S_o in m.ProcessOutputsByInput[ + reg_dir2, p, tech, vintage, S_i + ] + ) + ) + + for item in list(svars["Costs"]): + if item[2] == tech: + opposite_dir = ( + item[1][item[1].find("-") + 1 :] + + "-" + + item[1][: item[1].find("-")] + ) + if (item[0], opposite_dir, item[2], item[3]) in svars[ + "Costs" + ].keys(): + continue # if both directional entries are already in svars[ 'Costs' ], they're left intact. + if item[1] == reg_dir1: + if (act_dir1 + act_dir2) > 0: + svars["Costs"][(item[0], reg_dir2, item[2], item[3])] = ( + svars["Costs"][item] * act_dir2 / (act_dir1 + act_dir2) + ) + svars["Costs"][item] = ( + svars["Costs"][item] * act_dir1 / (act_dir1 + act_dir2) + ) + + # Remove Ri-Rj entries from being populated in the Outputs_Costs. Ri-Rj means a cost + # for region Rj + for item in list(svars["Costs"]): + if item[2] in m.tech_exchange: + svars["Costs"][ + (item[0], item[1][item[1].find("-") + 1 :], item[2], item[3]) + ] = svars["Costs"][item] + del svars["Costs"][item] + + if options.saveDUALS: + duals = collect_result_data(Cons, con_info, epsilon=1e-9) + + msg = ( + "Model name: %s\n" + "Objective function value (%s): %s\n" + "Non-zero variable values:\n" + ) + if ( + hasattr(options, "file_location") + and os.path.join("temoa_model", "config_sample_myopic") + not in options.file_location + ): + output.write(msg % (m.name, obj_name, obj_value)) + + def make_var_list(variables): + var_list = [] + for vgroup, values in sorted(variables.items()): + for vindex, val in sorted(values.items()): + if isinstance(vindex, tuple): + vindex = ",".join(str(i) for i in vindex) + var_list.append(("{}[{}]".format(vgroup, vindex), val)) + return var_list + + if svars: + stringify_data(make_var_list(svars), output) + else: + output.write("\nAll variables have a zero (0) value.\n") + + if len(con_info) > 0: + output.write("\nBinding constraint values:\n") + stringify_data(con_info, output) + del con_info + else: + # Since not all Coopr solvers give constraint results, must check + msg = "\nSelected Coopr solver plugin does not give constraint data.\n" + output.write(msg) + + output.write( + "\n\nIf you use these results for a published article, " + "please run Temoa with the '--how_to_cite' command line argument for " + "citation information.\n" + ) + + # ----------------------------------------------------------------- + # Write outputs stored in dictionary to the user-specified database + # ----------------------------------------------------------------- + + # Table dictionary below maps variable names to database table names + tables = { + "V_FlowIn": "Output_VFlow_In", + "V_FlowOut": "Output_VFlow_Out", + "V_Curtailment": "Output_Curtailment", + "V_Capacity": "Output_V_Capacity", + "V_CapacityAvailableByPeriodAndTech": "Output_CapacityByPeriodAndTech", + "V_EmissionActivityByPeriodAndProcess": "Output_Emissions", + "Objective": "Output_Objective", + "Costs": "Output_Costs", + } + + db_tables = [ + "time_periods", + "time_season", + "time_of_day", + "technologies", + "commodities", + "LifetimeTech", + "LifetimeProcess", + "Efficiency", + "EmissionActivity", + "ExistingCapacity", + ] + + if isinstance(options, TemoaConfig): + if not options.output: + if options.saveTEXTFILE or options.keepPyomoLP: + for inpu in options.dot_dat: + print(inpu) + file_ty = re.search(r"\b([\w-]+)\.(\w+)\b", inpu) + new_dir = ( + options.path_to_data + + os.sep + + file_ty.group(1) + + "_" + + options.scenario + + "_model" + ) + if os.path.exists(new_dir): + rmtree(new_dir) + os.mkdir(new_dir) + print("No Output File specified.") + return output + + if not os.path.exists(options.output): + print("Please put the " + options.output + " file in the right Directory") + return output + + con = sqlite3.connect(options.output) + cur = con.cursor() # A database cursor enables traversal over DB records + con.text_factory = str # This ensures data is explored with UTF-8 encoding + + ### Copy tables from Input File to DB file. + # IF output file is empty database. + cur.execute("SELECT * FROM technologies") + is_db_empty = False # False for empty db file + for elem in cur: + is_db_empty = True # True for non-empty db file + break + + if ( + is_db_empty + ): # This file could be schema with populated results from previous run. Or it could be a normal db file. + cur.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name='input_file';" + ) + does_input_file_table_exist = False + for i in cur: # This means that the 'input_file' table exists in db. + does_input_file_table_exist = True + if ( + does_input_file_table_exist + ): # This block distinguishes normal database from schema. + # This is schema file. + cur.execute("SELECT file FROM input_file WHERE id is '1';") + for i in cur: + tagged_file = i[0] + tagged_file = re.sub('["]', "", tagged_file) + + if tagged_file == options.dot_dat[0]: + # If Input_file name matches, add output and check tech/comm + dat_to_db(options.dot_dat[0], con) + else: + # If not a match, delete output tables and update input_file. Call dat_to_db + for i in db_tables: + cur.execute("DELETE FROM " + i + ";") + cur.execute("VACUUM;") + + for i in tables.keys(): + cur.execute("DELETE FROM " + tables[i] + ";") + cur.execute("VACUUM;") + + for i in options.dot_dat: + cur.execute("DELETE FROM input_file WHERE id=1;") + cur.execute("INSERT INTO input_file VALUES(1, '" + i + "');") + break + dat_to_db(i, con) + + else: # empty schema db file + cur.execute( + "CREATE TABLE IF NOT EXISTS input_file ( id integer PRIMARY KEY, file varchar(30));" + ) + + for i in tables.keys(): + cur.execute("DELETE FROM " + tables[i] + ";") + cur.execute("VACUUM;") + + for i in options.dot_dat: + cur.execute("DELETE FROM input_file WHERE id=1;") + cur.execute( + "INSERT INTO input_file(id, file) VALUES(?, ?);", (1, '"' + i + '"') + ) + break + dat_to_db(i, con) + + for table in svars.keys(): + if table in tables: + cur.execute("SELECT DISTINCT scenario FROM '" + tables[table] + "'") + for val in cur: + # If scenario exists, delete unless it's a myopic run (for myopic, the scenario results are deleted + # before the run in temoa_config.py) + if ( + hasattr(options, "file_location") + and options.scenario == val[0] + and os.path.join("temoa_model", "config_sample_myopic") + not in options.file_location + ): + cur.execute( + "DELETE FROM " + + tables[table] + + " \ + WHERE scenario is '" + + options.scenario + + "'" + ) + if table == "Objective": # Only table without sector info + for key in svars[table].keys(): + key_str = str(key) # only 1 row to write + key_str = key_str[1:-1] # Remove parentheses + cur.execute( + "INSERT INTO " + + tables[table] + + " \ + VALUES('" + + options.scenario + + "'," + + key_str + + ", \ + " + + str(svars[table][key]) + + ");" + ) + else: # First add 'NULL' for sector then update + for key in svars[table].keys(): # Need to loop over keys (rows) + key_str = str(key) + key_str = key_str[1:-1] # Remove parentheses + if table != "Costs": + cur.execute( + "INSERT INTO " + + tables[table] + + " VALUES('" + + str(key[0]) + + "', '" + + options.scenario + + "','NULL', \ + " + + key_str[key_str.find(",") + 1 :] + + "," + + str(svars[table][key]) + + ");" + ) + else: + key_str = str((key[0], key[2], key[3])) + key_str = key_str[1:-1] # Remove parentheses + cur.execute( + "INSERT INTO " + + tables[table] + + " VALUES('" + + str(key[1]) + + "', '" + + options.scenario + + "','NULL', \ + " + + key_str + + "," + + str(svars[table][key]) + + ");" + ) + cur.execute( + "UPDATE " + + tables[table] + + " SET sector = \ (SELECT technologies.sector FROM technologies \ - WHERE "+tables[table]+".tech = technologies.tech);") - - #WRITE DUALS RESULTS - if (options.saveDUALS): - if (len(duals)!=0): - overwrite_keys = [str(tuple(x)) for x in duals.reset_index()[['constraint_name','scenario']].to_records(index=False)] - #delete records that will be overwritten by new duals dataframe - cur.execute("DELETE FROM Output_Duals WHERE (constraint_name, scenario) IN (VALUES " + ','.join(overwrite_keys) + ")") - #write new records from new duals dataframe - duals.to_sql('Output_Duals',con, if_exists='append') - - con.commit() - con.close() - - if options.saveEXCEL or options.saveTEXTFILE or options.keepPyomoLP: - for inpu in options.dot_dat: - file_ty = re.search(r"\b([\w-]+)\.(\w+)\b", inpu) - new_dir = options.path_to_data+os.sep+file_ty.group(1)+'_'+options.scenario+'_model' - if os.path.exists( new_dir ): - rmtree( new_dir ) - os.mkdir(new_dir) - - if options.saveEXCEL: - file_type = re.search(r"([\w-]+)\.(\w+)\b", options.output) - file_n = file_type.group(1) - temp_scenario = set() - temp_scenario.add(options.scenario) - #make_excel function imported near the top - make_excel(options.output, new_dir+os.sep+options.scenario, temp_scenario) - #os.system("python data_processing"+os.sep+"DB_to_Excel.py -i \ - # ""+options.output+" \ - # " -o data_files"+os.sep+options.scenario+" -s "+options.scenario) - - return output - -def dat_to_db(input_file, output_schema, run_partial=False): + WHERE " + + tables[table] + + ".tech = technologies.tech);" + ) + + # WRITE DUALS RESULTS + if options.saveDUALS: + if len(duals) != 0: + overwrite_keys = [ + str(tuple(x)) + for x in duals.reset_index()[ + ["constraint_name", "scenario"] + ].to_records(index=False) + ] + # delete records that will be overwritten by new duals dataframe + cur.execute( + "DELETE FROM Output_Duals WHERE (constraint_name, scenario) IN (VALUES " + + ",".join(overwrite_keys) + + ")" + ) + # write new records from new duals dataframe + duals.to_sql("Output_Duals", con, if_exists="append") + + con.commit() + con.close() + + if options.saveEXCEL or options.saveTEXTFILE or options.keepPyomoLP: + for inpu in options.dot_dat: + file_ty = re.search(r"\b([\w-]+)\.(\w+)\b", inpu) + new_dir = ( + options.path_to_data + + os.sep + + file_ty.group(1) + + "_" + + options.scenario + + "_model" + ) + if os.path.exists(new_dir): + rmtree(new_dir) + os.mkdir(new_dir) + + if options.saveEXCEL: + file_type = re.search(r"([\w-]+)\.(\w+)\b", options.output) + file_n = file_type.group(1) + temp_scenario = set() + temp_scenario.add(options.scenario) + # make_excel function imported near the top + make_excel( + options.output, new_dir + os.sep + options.scenario, temp_scenario + ) + # os.system("python data_processing"+os.sep+"DB_to_Excel.py -i \ + # ""+options.output+" \ + # " -o data_files"+os.sep+options.scenario+" -s "+options.scenario) + + return output + - def traverse_dat(dat_filename, search_tablename): - - result_string = "" - table_found_flag = False - - with open(dat_filename) as f: - for line in f: - line = re.sub("[#].*$", " ", line) - - if table_found_flag: - result_string += line - if re.search(";\s*$", line): - break - - if re.search(""+search_tablename+"\s*[:][=]", line): - result_string += line - table_found_flag = True - if re.search(";\s*$", line): - break - - return result_string - - #####Code Starts here - tables_single_value = [ 'time_exist', 'time_future', 'time_season', 'time_of_day', \ - 'tech_baseload', 'tech_resource', 'tech_production', 'tech_storage', \ - 'commodity_physical', 'commodity_demand', 'commodity_emissions'] - - partial_run_tech = ['tech_baseload', 'tech_resource', 'tech_production', 'tech_storage'] - - partial_run_comm = ['commodity_physical', 'commodity_demand', 'commodity_emissions'] - - tables_multiple_value = ['ExistingCapacity', 'Efficiency', 'LifetimeTech', \ - 'LifetimeProcess', 'EmissionActivity'] - - parsed_data = {} - - #if db_or_dat_flag: #This is an input db file - # import pdb; pdb.set_trace() - # output_schema.execute("ATTACH DATABASE ? AS db2;", "'"+input_file+"'") - # for i in db_tables: - # output_schema.execute("INSERT INTO "+i+" SELECT * FROM db2."+i+";") - - if run_partial: - comm_set = set() - tech_set = set() - for i in partial_run_comm: - raw_string = traverse_dat(input_file, i) - raw_string = re.sub("\s+", " ", raw_string) - raw_string = re.sub("^.*[:][=]", "", raw_string) - raw_string = re.sub(";\s*$", "", raw_string) - raw_string = re.sub("^\s+|\s+$", "", raw_string) - parsed_data[i] = re.split(" ", raw_string) - for datas in parsed_data[i]: - if datas == '': - continue - comm_set.add(datas) - - for i in partial_run_tech: - raw_string = traverse_dat(input_file, i) - raw_string = re.sub("\s+", " ", raw_string) - raw_string = re.sub("^.*[:][=]", "", raw_string) - raw_string = re.sub(";\s*$", "", raw_string) - raw_string = re.sub("^\s+|\s+$", "", raw_string) - parsed_data[i] = re.split(" ", raw_string) - for datas in parsed_data[i]: - if datas == '': - continue - tech_set.add(datas) - - return comm_set, tech_set - - #This is an input dat file - for i in tables_single_value: - raw_string = traverse_dat(input_file, i) - raw_string = re.sub("\s+", " ", raw_string) - raw_string = re.sub("^.*[:][=]", "", raw_string) - raw_string = re.sub(";\s*$", "", raw_string) - raw_string = re.sub("^\s+|\s+$", "", raw_string) - parsed_data[i] = re.split(" ", raw_string) - - for i in tables_multiple_value: - raw_string = traverse_dat(input_file, i) - raw_string = re.sub("\n", ",", raw_string) - raw_string = re.sub("\s+", " ", raw_string) - raw_string = re.sub("^.*[:][=]\s*,", "", raw_string) - raw_string = re.sub(",?;\s*,?$", "", raw_string) - raw_string = re.sub("^\s+|\s+$", "", raw_string) - raw_string = re.sub("\s?,\s?", ",", raw_string) - raw_string = re.sub(",+", ",", raw_string) - parsed_data[i] = re.split(",", raw_string) - - #Fill time_periods - for i in parsed_data['time_exist']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO time_periods VALUES("+i+", 'e');") - for i in parsed_data['time_future']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO time_periods VALUES("+i+", 'f');") - - #Fill time_season - for i in parsed_data['time_season']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO time_season VALUES('"+i+"');") - - #Fill time_of_day - for i in parsed_data['time_of_day']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO time_of_day VALUES('"+i+"');") - - #Fill technologies - for i in parsed_data['tech_baseload']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'pb', '', '');") - for i in parsed_data['tech_storage']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'ph', '', '');") - for i in parsed_data['tech_production']: - if i is '': - continue - if i in parsed_data['tech_storage']: - continue - if i in parsed_data['tech_baseload']: - continue - output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'p', '', '');") - for i in parsed_data['tech_resource']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'r', '', '');") - - #Fill commodities - for i in parsed_data['commodity_demand']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO commodities VALUES('"+i+"', 'd', '');") - for i in parsed_data['commodity_physical']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO commodities VALUES('"+i+"', 'p', '');") - for i in parsed_data['commodity_emissions']: - if i is '': - continue - output_schema.execute("INSERT OR REPLACE INTO commodities VALUES('"+i+"', 'e', '');") - - - #Fill ExistingCapacity - for i in parsed_data['ExistingCapacity']: - if i is '': - continue - row_data = re.split(" ", i) - row_data.append('') - row_data.append('') - output_schema.execute("INSERT OR REPLACE INTO ExistingCapacity VALUES(?, ?, ?, ?, ?);", row_data) - - #Fill Efficiency - for i in parsed_data['Efficiency']: - if i is '': - continue - row_data = re.split(" ", i) - row_data.append('') - output_schema.execute("INSERT OR REPLACE INTO Efficiency VALUES(?, ?, ?, ?, ?, ?);", row_data) - - #Fill LifetimeTech - for i in parsed_data['LifetimeTech']: - if i is '': - continue - row_data = re.split(" ", i) - row_data.append('') - output_schema.execute("INSERT OR REPLACE INTO LifetimeTech VALUES(?, ?, ?);", row_data) - - #Fill LifetimeProcess - for i in parsed_data['LifetimeProcess']: - if i is '': - continue - row_data = re.split(" ", i) - row_data.append('') - output_schema.execute("INSERT OR REPLACE INTO LifetimeProcess VALUES(?, ?, ?, ?);", row_data) - - #Fill EmissionActivity - for i in parsed_data['EmissionActivity']: - if i is '': - continue - row_data = re.split(" ", i) - row_data.append('') - if len(row_data) is 7: - row_data.append('') - output_schema.execute("INSERT OR REPLACE INTO EmissionActivity VALUES(?, ?, ?, ?, ?, ?, ?, ?);", row_data) - +def dat_to_db(input_file, output_schema, run_partial=False): + def traverse_dat(dat_filename, search_tablename): + result_string = "" + table_found_flag = False + + with open(dat_filename) as f: + for line in f: + line = re.sub("[#].*$", " ", line) + + if table_found_flag: + result_string += line + if re.search(";\s*$", line): + break + + if re.search("" + search_tablename + "\s*[:][=]", line): + result_string += line + table_found_flag = True + if re.search(";\s*$", line): + break + + return result_string + + #####Code Starts here + tables_single_value = [ + "time_exist", + "time_future", + "time_season", + "time_of_day", + "tech_baseload", + "tech_resource", + "tech_production", + "tech_storage", + "commodity_physical", + "commodity_demand", + "commodity_emissions", + ] + + partial_run_tech = [ + "tech_baseload", + "tech_resource", + "tech_production", + "tech_storage", + ] + + partial_run_comm = ["commodity_physical", "commodity_demand", "commodity_emissions"] + + tables_multiple_value = [ + "ExistingCapacity", + "Efficiency", + "LifetimeTech", + "LifetimeProcess", + "EmissionActivity", + ] + + parsed_data = {} + + # if db_or_dat_flag: #This is an input db file + # import pdb; pdb.set_trace() + # output_schema.execute("ATTACH DATABASE ? AS db2;", "'"+input_file+"'") + # for i in db_tables: + # output_schema.execute("INSERT INTO "+i+" SELECT * FROM db2."+i+";") + + if run_partial: + comm_set = set() + tech_set = set() + for i in partial_run_comm: + raw_string = traverse_dat(input_file, i) + raw_string = re.sub("\s+", " ", raw_string) + raw_string = re.sub("^.*[:][=]", "", raw_string) + raw_string = re.sub(";\s*$", "", raw_string) + raw_string = re.sub("^\s+|\s+$", "", raw_string) + parsed_data[i] = re.split(" ", raw_string) + for datas in parsed_data[i]: + if datas == "": + continue + comm_set.add(datas) + + for i in partial_run_tech: + raw_string = traverse_dat(input_file, i) + raw_string = re.sub("\s+", " ", raw_string) + raw_string = re.sub("^.*[:][=]", "", raw_string) + raw_string = re.sub(";\s*$", "", raw_string) + raw_string = re.sub("^\s+|\s+$", "", raw_string) + parsed_data[i] = re.split(" ", raw_string) + for datas in parsed_data[i]: + if datas == "": + continue + tech_set.add(datas) + + return comm_set, tech_set + + # This is an input dat file + for i in tables_single_value: + raw_string = traverse_dat(input_file, i) + raw_string = re.sub("\s+", " ", raw_string) + raw_string = re.sub("^.*[:][=]", "", raw_string) + raw_string = re.sub(";\s*$", "", raw_string) + raw_string = re.sub("^\s+|\s+$", "", raw_string) + parsed_data[i] = re.split(" ", raw_string) + + for i in tables_multiple_value: + raw_string = traverse_dat(input_file, i) + raw_string = re.sub("\n", ",", raw_string) + raw_string = re.sub("\s+", " ", raw_string) + raw_string = re.sub("^.*[:][=]\s*,", "", raw_string) + raw_string = re.sub(",?;\s*,?$", "", raw_string) + raw_string = re.sub("^\s+|\s+$", "", raw_string) + raw_string = re.sub("\s?,\s?", ",", raw_string) + raw_string = re.sub(",+", ",", raw_string) + parsed_data[i] = re.split(",", raw_string) + + # Fill time_periods + for i in parsed_data["time_exist"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO time_periods VALUES(" + i + ", 'e');" + ) + for i in parsed_data["time_future"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO time_periods VALUES(" + i + ", 'f');" + ) + + # Fill time_season + for i in parsed_data["time_season"]: + if i is "": + continue + output_schema.execute("INSERT OR REPLACE INTO time_season VALUES('" + i + "');") + + # Fill time_of_day + for i in parsed_data["time_of_day"]: + if i is "": + continue + output_schema.execute("INSERT OR REPLACE INTO time_of_day VALUES('" + i + "');") + + # Fill technologies + for i in parsed_data["tech_baseload"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO technologies VALUES('" + i + "', 'pb', '', '');" + ) + for i in parsed_data["tech_storage"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO technologies VALUES('" + i + "', 'ph', '', '');" + ) + for i in parsed_data["tech_production"]: + if i is "": + continue + if i in parsed_data["tech_storage"]: + continue + if i in parsed_data["tech_baseload"]: + continue + output_schema.execute( + "INSERT OR REPLACE INTO technologies VALUES('" + i + "', 'p', '', '');" + ) + for i in parsed_data["tech_resource"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO technologies VALUES('" + i + "', 'r', '', '');" + ) + + # Fill commodities + for i in parsed_data["commodity_demand"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO commodities VALUES('" + i + "', 'd', '');" + ) + for i in parsed_data["commodity_physical"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO commodities VALUES('" + i + "', 'p', '');" + ) + for i in parsed_data["commodity_emissions"]: + if i is "": + continue + output_schema.execute( + "INSERT OR REPLACE INTO commodities VALUES('" + i + "', 'e', '');" + ) + + # Fill ExistingCapacity + for i in parsed_data["ExistingCapacity"]: + if i is "": + continue + row_data = re.split(" ", i) + row_data.append("") + row_data.append("") + output_schema.execute( + "INSERT OR REPLACE INTO ExistingCapacity VALUES(?, ?, ?, ?, ?);", row_data + ) + + # Fill Efficiency + for i in parsed_data["Efficiency"]: + if i is "": + continue + row_data = re.split(" ", i) + row_data.append("") + output_schema.execute( + "INSERT OR REPLACE INTO Efficiency VALUES(?, ?, ?, ?, ?, ?);", row_data + ) + + # Fill LifetimeTech + for i in parsed_data["LifetimeTech"]: + if i is "": + continue + row_data = re.split(" ", i) + row_data.append("") + output_schema.execute( + "INSERT OR REPLACE INTO LifetimeTech VALUES(?, ?, ?);", row_data + ) + + # Fill LifetimeProcess + for i in parsed_data["LifetimeProcess"]: + if i is "": + continue + row_data = re.split(" ", i) + row_data.append("") + output_schema.execute( + "INSERT OR REPLACE INTO LifetimeProcess VALUES(?, ?, ?, ?);", row_data + ) + + # Fill EmissionActivity + for i in parsed_data["EmissionActivity"]: + if i is "": + continue + row_data = re.split(" ", i) + row_data.append("") + if len(row_data) is 7: + row_data.append("") + output_schema.execute( + "INSERT OR REPLACE INTO EmissionActivity VALUES(?, ?, ?, ?, ?, ?, ?, ?);", + row_data, + ) diff --git a/temoa_model/temoa_config.py b/temoa_model/temoa_config.py index 29760d76..d2c1a9ac 100644 --- a/temoa_model/temoa_config.py +++ b/temoa_model/temoa_config.py @@ -23,488 +23,581 @@ import re + def db_2_dat(ifile, ofile, options): - # Adapted from DB_to_DAT.py - import sqlite3 - import sys - import re - import getopt - - def write_tech_mga(f): - cur.execute("SELECT tech FROM technologies") - f.write("set tech_mga :=\n") - for row in cur: - f.write(row[0] + '\n') - f.write(';\n\n') - - def write_tech_sector(f): - sectors = set() - cur.execute("SELECT sector FROM technologies") - for row in cur: - sectors.add(row[0]) - for s in sectors: - cur.execute("SELECT tech FROM technologies WHERE sector == '" + s + "'") - f.write("set tech_" + s + " :=\n") - for row in cur: - f.write(row[0] + '\n') - f.write(';\n\n') - - def query_table (t_properties, f): - t_type = t_properties[0] #table type (set or param) - t_name = t_properties[1] #table name - t_dtname = t_properties[2] #DAT table name when DB table must be subdivided - t_flag = t_properties[3] #table flag, if any - t_index = t_properties[4] #table column index after which '#' should be specified - if type(t_flag) is list: #tech production table has a list for flags; this is currently hard-wired - db_query = "SELECT * FROM " + t_name + " WHERE flag=='p' OR flag=='pb' OR flag=='ps'" - cur.execute(db_query) - if cur.fetchone() is None: - return - if t_type == "set": - f.write("set " + t_dtname + " := \n") - else: - f.write("param " + t_dtname + " := \n") - elif t_flag != '': #check to see if flag is empty, if not use it to make table - db_query = "SELECT * FROM " + t_name + " WHERE flag=='" + t_flag + "'" - cur.execute(db_query) - if cur.fetchone() is None: - return - if t_type == "set": - f.write("set " + t_dtname + " := \n") - else: - f.write("param " + t_dtname + " := \n") - else: #Only other possible case is empty flag, then 1-to-1 correspodence between DB and DAT table names - db_query = "SELECT * FROM " + t_name - cur.execute(db_query) - if cur.fetchone() is None: - return - if t_type == "set": - f.write("set " + t_name + " := \n") - else: - f.write("param " + t_name + " := \n") - cur.execute(db_query) - if t_index == 0: #make sure that units and descriptions are commented out in DAT file - for line in cur: - str_row = str(line[0]) + "\n" - f.write(str_row) - print(str_row) - else: - for line in cur: - before_comments = line[:t_index+1] - before_comments = re.sub('[(]', '', str(before_comments)) - before_comments = re.sub('[\',)]', ' ', str(before_comments)) - after_comments = line[t_index+2:] - after_comments = re.sub('[(]', '', str(after_comments)) - after_comments = re.sub('[\',)]', ' ', str(after_comments)) - search_afcom = re.search(r'^\W+$', str(after_comments)) #Search if after_comments is empty. - if not search_afcom : - str_row = before_comments + "# " + after_comments + "\n" - else : - str_row = before_comments + "\n" - f.write(str_row) - print(str_row) - f.write(';\n\n') - - #[set or param, table_name, DAT fieldname, flag (if any), index (where to insert '#') - table_list = [ - ['set', 'time_periods', 'time_exist', 'e', 0], - ['set', 'time_periods', 'time_future', 'f', 0], - ['set', 'time_season', '', '', 0], - ['set', 'time_of_day', '', '', 0], - ['set', 'regions', '', '', 0], - ['set', 'tech_curtailment', '', '', 0], - ['set', 'tech_flex', '', '', 0], - ['set', 'tech_reserve', '', '', 0], - ['set', 'technologies', 'tech_resource', 'r', 0], - ['set', 'technologies', 'tech_production', ['p','pb','ps'], 0], - ['set', 'technologies', 'tech_baseload', 'pb', 0], - ['set', 'technologies', 'tech_storage', 'ps', 0], - ['set', 'tech_ramping', '', '', 0], - ['set', 'tech_exchange', '', '', 0], - ['set', 'commodities', 'commodity_physical', 'p', 0], - ['set', 'commodities', 'commodity_emissions', 'e', 0], - ['set', 'commodities', 'commodity_demand', 'd', 0], - ['set', 'tech_groups', '', '', 0], - ['set', 'tech_annual', '', '', 0], - ['set', 'tech_variable', '', '', 0], - ['set', 'groups', '', '', 0], - ['param','MinGenGroupTarget', '', '', 2], - ['param','MinGenGroupWeight', '', '', 3], - ['param','LinkedTechs', '', '', 3], - ['param','SegFrac', '', '', 2], - ['param','DemandSpecificDistribution','', '', 4], - ['param','CapacityToActivity', '', '', 2], - ['param','PlanningReserveMargin', '', '', 2], - ['param','GlobalDiscountRate', '', '', 0], - ['param','MyopicBaseyear', '', '', 0], - ['param','DiscountRate', '', '', 3], - ['param','EmissionActivity', '', '', 6], - ['param','EmissionLimit', '', '', 3], - ['param','Demand', '', '', 3], - ['param','TechOutputSplit', '', '', 4], - ['param','TechInputSplit', '', '', 4], - ['param','TechInputSplitAverage', '', '', 4], - ['param','MinCapacity', '', '', 3], - ['param','MaxCapacity', '', '', 3], - ['param','MaxActivity', '', '', 3], - ['param','MinActivity', '', '', 3], - ['param','MaxResource', '', '', 2], - ['param','GrowthRateMax', '', '', 2], - ['param','GrowthRateSeed', '', '', 2], - ['param','LifetimeTech', '', '', 2], - ['param','LifetimeProcess', '', '', 3], - ['param','LifetimeLoanTech', '', '', 2], - ['param','CapacityFactorTech', '', '', 4], - ['param','CapacityFactorProcess', '', '', 5], - ['param','Efficiency', '', '', 5], - ['param','ExistingCapacity', '', '', 3], - ['param','CostInvest', '', '', 3], - ['param','CostFixed', '', '', 4], - ['param','CostVariable', '', '', 4], - ['param','CapacityCredit', '', '', 4], - ['param','RampUp', '', '', 2], - ['param','RampDown', '', '', 2], - ['param','StorageInitFrac', '', '', 3], - ['param','StorageDuration', '', '', 2]] - - with open(ofile, 'w') as f: - f.write('data ;\n\n') - #connect to the database - con = sqlite3.connect(ifile, isolation_level=None) - cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database - con.text_factory = str #this ensures data is explored with the correct UTF-8 encoding - - # Return the full list of existing tables. - table_exist = cur.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall() - table_exist = [i[0] for i in table_exist] - - for table in table_list: - if table[1] in table_exist: - query_table(table, f) - if options.mga_weight == 'integer': - write_tech_mga(f) - if options.mga_weight == 'normalized': - write_tech_sector(f) - - # Making sure the database is empty from the begining for a myopic solve - if options.myopic: - cur.execute("DELETE FROM Output_CapacityByPeriodAndTech WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_Emissions WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_Costs WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_Objective WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_VFlow_In WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_VFlow_Out WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_V_Capacity WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_Curtailment WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("DELETE FROM Output_Duals WHERE scenario="+"'"+str(options.scenario)+"'") - cur.execute("VACUUM") - con.commit() - - cur.close() - con.close() - -class TemoaConfig( object ): - states = ( - ('mga', 'exclusive'), - ) - - tokens = ( - 'dot_dat', - 'output', - 'scenario', - 'how_to_cite', - 'version', - 'solver', - 'neos', - 'keep_pyomo_lp_file', - 'saveEXCEL', - 'myopic' - 'myopic_periods' - 'keep_myopic_databases' - 'saveDUALS' - 'saveTEXTFILE', - 'mgaslack', - 'mgaiter', - 'path_to_data', - 'path_to_logs', - 'mgaweight' - ) - - t_ANY_ignore = '[ \t]' - - def __init__(self, **kwargs): - # Make compatible with Python 2.7 and 3 - try: - import queue - except: - import Queue as queue - - self.__error = list() - self.__mga_todo = queue.Queue() - self.__mga_done = queue.Queue() - - self.file_location = None - self.dot_dat = list() # Use Kevin's name. - self.output = None # May update to a list if multiple output is required. - self.scenario = None - self.saveEXCEL = False - self.myopic = False - self.myopic_periods = 0 - self.KeepMyopicDBs = False - self.saveDUALS = False - self.saveTEXTFILE = False - self.how_to_cite = None - self.version = False - self.neos = False - self.generateSolverLP = False - self.keepPyomoLP = False - self.mga = None # mga slack value - self.mga_iter = None - self.mga_weight = None - - # To keep consistent with Kevin's argumetn parser, will be removed in the future. - self.graph_format = None - self.show_capacity = False - self.graph_type = 'separate_vintages' - self.use_splines = False - - #Introduced during UI Development - self.path_to_data = re.sub('temoa_model$', 'data_files', dirname(abspath(__file__)))# Path to where automated excel and text log folder will be save as output. - self.path_to_logs = self.path_to_data+sep+"debug_logs" #Path to where debug logs will be generated for each run. By default in debug_logs folder in db_io. - self.path_to_lp_files = None - self.abort_temoa = False - - if 'd_solver' in kwargs.keys(): - self.solver = kwargs['d_solver'] - else: - self.solver = None - - def __repr__(self): - width = 25 - spacer = '\n' + '-'*width + '\n' - msg = spacer - msg += '{:>{}s}: {}\n'.format('Config file', width, self.file_location) - for i in self.dot_dat: - if self.dot_dat.index(i) == 0: - msg += '{:>{}s}: {}\n'.format('Input file', width, i) - else: - msg += '{:>25s} {}\n'.format(' ', i) - msg += '{:>{}s}: {}\n'.format('Output file', width, self.output) - msg += '{:>{}s}: {}\n'.format('Scenario', width, self.scenario) - msg += '{:>{}s}: {}\n'.format('Spreadsheet output', width, self.saveEXCEL) - msg += '{:>{}s}: {}\n'.format('Myopic scheme', width, self.myopic) - msg += '{:>{}s}: {}\n'.format('Myopic years', width, self.myopic_periods) - msg += '{:>{}s}: {}\n'.format('Retain myopic databases', width, self.KeepMyopicDBs) - msg += spacer - msg += '{:>{}s}: {}\n'.format('Citation output status', width, self.how_to_cite) - msg += '{:>{}s}: {}\n'.format('NEOS status', width, self.neos) - msg += '{:>{}s}: {}\n'.format('Version output status', width, self.version) - msg += spacer - msg += '{:>{}s}: {}\n'.format('Selected solver status', width, self.solver) - msg += '{:>{}s}: {}\n'.format('Solver LP write status', width, self.generateSolverLP) - msg += '{:>{}s}: {}\n'.format('Pyomo LP write status', width, self.keepPyomoLP) - msg += spacer - msg += '{:>{}s}: {}\n'.format('MGA slack value', width, self.mga) - msg += '{:>{}s}: {}\n'.format('MGA # of iterations', width, self.mga_iter) - msg += '{:>{}s}: {}\n'.format('MGA weighting method', width, self.mga_weight) - msg += '**NOTE: If you are performing MGA runs, navigate to the DAT file and make any modifications to the MGA sets before proceeding.' - return msg - - def t_ANY_COMMENT(self, t): - r'\#.*' - pass - - def t_dot_dat(self, t): - r'--input[\s\=]+[-\\\/\:\.\~\w]+(\.dat|\.db|\.sqlite)\b' - self.dot_dat.append(abspath(t.value.replace('=', ' ').split()[1])) - - def t_output(self, t): - r'--output[\s\=]+[-\\\/\:\.\~\w]+(\.db|\.sqlite)\b' - self.output = abspath(t.value.replace('=', ' ').split()[1]) - - def t_scenario(self, t): - r'--scenario[\s\=]+\w+\b' - self.scenario = t.value.replace('=', ' ').split()[1] - - def t_saveEXCEL(self, t): - r'--saveEXCEL\b' - self.saveEXCEL = True - - def t_saveDUALS(self, t): - r'--saveDUALS\b' - self.saveDUALS = True - - def t_myopic(self, t): - r'--myopic\b' - self.myopic = True - - def t_myopic_periods(self, t): - r'--myopic_periods[\s\=]+[\d]+' - self.myopic_periods = int(t.value.replace('=', ' ').split()[1]) - - def t_keep_myopic_databases(self, t): - r'--keep_myopic_databases\b' - self.KeepMyopicDBs = True - - def t_saveTEXTFILE(self, t): - r'--saveTEXTFILE\b' - self.saveTEXTFILE = True - - def t_path_to_data(self, t): - r'--path_to_data[\s\=]+[-\\\/\:\.\~\w\ ]+\b' - self.path_to_data = abspath(t.value.replace('=', ',').split(",")[1]) - - def t_path_to_logs(self, t): - r'--path_to_logs[\s\=]+[-\\\/\:\.\~\w\ ]+\b' - self.path_to_logs = abspath(t.value.replace('=', ',').split(",")[1]) - - def t_how_to_cite(self, t): - r'--how_to_cite\b' - self.how_to_cite = True - - def t_version(self, t): - r'--version\b' - self.version = True - - def t_neos(self, t): - r'--neos\b' - self.neos = True - - def t_solver(self, t): - r'--solver[\s\=]+\w+\b' - self.solver = t.value.replace('=', ' ').split()[1] - - def t_keep_pyomo_lp_file(self, t): - r'--keep_pyomo_lp_file\b' - self.keepPyomoLP = True - - def t_begin_mga(self, t): - r'--mga[\s\=]+\{' - t.lexer.push_state('mga') - t.lexer.level = 1 - - def t_mga_mgaslack(self, t): - r'slack[\s\=]+[\.\d]+' - self.mga = float(t.value.replace('=', ' ').split()[1]) - - def t_mga_mgaiter(self, t): - r'iteration[\s\=]+[\d]+' - self.mga_iter = int(t.value.replace('=', ' ').split()[1]) - - def t_mga_mgaweight(self, t): - r'weight[\s\=]+(integer|normalized|distance)\b' - self.mga_weight = t.value.replace('=', ' ').split()[1] - - def t_mga_end(self, t): - r'\}' - t.lexer.pop_state() - t.lexer.level -= 1 - - def t_ANY_newline(self,t): - r'\n+|(\r\n)+|\r+' # '\n' (In linux) = '\r\n' (In Windows) = '\r' (In Mac OS) - t.lexer.lineno += len(t.value) - - def t_ANY_error(self, t): - if not self.__error: - self.__error.append({'line': [t.lineno, t.lineno], 'index': [t.lexpos, t.lexpos], 'value': t.value[0]}) - elif t.lexpos - self.__error[-1]['index'][-1] == 1: - self.__error[-1]['line' ][-1] = t.lineno - self.__error[-1]['index'][-1] = t.lexpos - self.__error[-1]['value'] += t.value[0] - else: - self.__error.append({'line': [t.lineno, t.lineno], 'index': [t.lexpos, t.lexpos], 'value': t.value[0]}) - t.lexer.skip(1) - - def next_mga(self): - if not self.__mga_todo.empty(): - self.__mga_done.put(self.scenario) - self.scenario = self.__mga_todo.get() - return True - else: - return False - - def build(self,**kwargs): - import ply.lex as lex, os, sys - - db_or_dat = True # True means input file is a db file. False means input is a dat file. - - if 'config' in kwargs: - if isfile(kwargs['config']): - self.file_location= abspath(kwargs.pop('config')) - else: - msg = 'No such file exists: {}'.format(kwargs.pop('config')) - raise Exception( msg ) - - self.lexer = lex.lex(module=self, **kwargs) - if self.file_location: - try: - with open(self.file_location, encoding="utf8") as f: - self.lexer.input(f.read()) - except: - with open(self.file_location, 'r') as f: - self.lexer.input(f.read()) - while True: - tok = self.lexer.token() - if not tok: break - - if self.__error: - width = 25 - msg = '\nIllegal character(s) in config file:\n' - msg += '-'*width + '\n' - for e in self.__error: - msg += "Line {} to {}: '{}'\n".format(e['line'][0], e['line'][1], e['value']) - msg += '-'*width + '\n' - sys.stderr.write(msg) - - try: - txt_file = open(self.path_to_logs+os.sep+"Complete_OutputLog.log", "w") - except BaseException as io_exc: - sys.stderr.write("Log file cannot be opened. Please check path. Trying to find:\n"+self.path_to_logs+" folder\n") - txt_file = open("OutputLog.log", "w") - - txt_file.write( msg ) - txt_file.close() - self.abort_temoa = True - - - if not self.dot_dat: - raise Exception('Input file not specified.') - - for i in self.dot_dat: - if not isfile(i): - raise Exception('Cannot locate input file: {}'.format(i)) - i_name, i_ext = splitext(i) - if (i_ext == '.dat') or (i_ext == '.txt'): - db_or_dat = False - elif (i_ext == '.db') or (i_ext == '.sqlite') or (i_ext == '.sqlite3') or (i_ext == 'sqlitedb'): - db_or_dat = True - - if not self.output and db_or_dat: - raise Exception('Output file not specified.') - - if db_or_dat and not isfile(self.output): - raise Exception('Cannot locate output file: {}.'.format(self.output)) - - if not self.scenario and db_or_dat: - raise Exception('Scenario name not specified.') - - if self.mga_iter: - for i in range(self.mga_iter): - self.__mga_todo.put(self.scenario + '_mga_' + str(i)) - - f = open(os.devnull, 'w'); - sys.stdout = f # Suppress the original DB_to_DAT.py output - - counter = 0 - - for ifile in self.dot_dat: - i_name, i_ext = splitext(ifile) - if i_ext != '.dat': - ofile = i_name + '.dat' - db_2_dat(ifile, ofile, self) - self.dot_dat[self.dot_dat.index(ifile)] = ofile - counter += 1 - f.close() - sys.stdout = sys.__stdout__ - if counter > 0: - sys.stderr.write("\n{} .db DD file(s) converted\n".format(counter)) \ No newline at end of file + # Adapted from DB_to_DAT.py + import sqlite3 + import sys + import re + import getopt + + def write_tech_mga(f): + cur.execute("SELECT tech FROM technologies") + f.write("set tech_mga :=\n") + for row in cur: + f.write(row[0] + "\n") + f.write(";\n\n") + + def write_tech_sector(f): + sectors = set() + cur.execute("SELECT sector FROM technologies") + for row in cur: + sectors.add(row[0]) + for s in sectors: + cur.execute("SELECT tech FROM technologies WHERE sector == '" + s + "'") + f.write("set tech_" + s + " :=\n") + for row in cur: + f.write(row[0] + "\n") + f.write(";\n\n") + + def query_table(t_properties, f): + t_type = t_properties[0] # table type (set or param) + t_name = t_properties[1] # table name + t_dtname = t_properties[2] # DAT table name when DB table must be subdivided + t_flag = t_properties[3] # table flag, if any + t_index = t_properties[ + 4 + ] # table column index after which '#' should be specified + if ( + type(t_flag) is list + ): # tech production table has a list for flags; this is currently hard-wired + db_query = ( + "SELECT * FROM " + + t_name + + " WHERE flag=='p' OR flag=='pb' OR flag=='ps'" + ) + cur.execute(db_query) + if cur.fetchone() is None: + return + if t_type == "set": + f.write("set " + t_dtname + " := \n") + else: + f.write("param " + t_dtname + " := \n") + elif t_flag != "": # check to see if flag is empty, if not use it to make table + db_query = "SELECT * FROM " + t_name + " WHERE flag=='" + t_flag + "'" + cur.execute(db_query) + if cur.fetchone() is None: + return + if t_type == "set": + f.write("set " + t_dtname + " := \n") + else: + f.write("param " + t_dtname + " := \n") + else: # Only other possible case is empty flag, then 1-to-1 correspodence between DB and DAT table names + db_query = "SELECT * FROM " + t_name + cur.execute(db_query) + if cur.fetchone() is None: + return + if t_type == "set": + f.write("set " + t_name + " := \n") + else: + f.write("param " + t_name + " := \n") + cur.execute(db_query) + if t_index == 0: + # make sure that units and descriptions are commented out in DAT file + for line in cur: + str_row = str(line[0]) + "\n" + f.write(str_row) + print(str_row) + else: + for line in cur: + before_comments = line[: t_index + 1] + before_comments = re.sub("[(]", "", str(before_comments)) + before_comments = re.sub("[',)]", " ", str(before_comments)) + after_comments = line[t_index + 2 :] + after_comments = re.sub("[(]", "", str(after_comments)) + after_comments = re.sub("[',)]", " ", str(after_comments)) + search_afcom = re.search( + r"^\W+$", str(after_comments) + ) # Search if after_comments is empty. + if not search_afcom: + str_row = before_comments + "# " + after_comments + "\n" + else: + str_row = before_comments + "\n" + f.write(str_row) + print(str_row) + f.write(";\n\n") + + # [set or param, table_name, DAT fieldname, flag (if any), index (where to insert '#') + table_list = [ + ["set", "time_periods", "time_exist", "e", 0], + ["set", "time_periods", "time_future", "f", 0], + ["set", "time_season", "", "", 0], + ["set", "time_of_day", "", "", 0], + ["set", "regions", "", "", 0], + ["set", "tech_curtailment", "", "", 0], + ["set", "tech_flex", "", "", 0], + ["set", "tech_reserve", "", "", 0], + ["set", "technologies", "tech_resource", "r", 0], + ["set", "technologies", "tech_production", ["p", "pb", "ps"], 0], + ["set", "technologies", "tech_baseload", "pb", 0], + ["set", "technologies", "tech_storage", "ps", 0], + ["set", "tech_ramping", "", "", 0], + ["set", "tech_exchange", "", "", 0], + ["set", "commodities", "commodity_physical", "p", 0], + ["set", "commodities", "commodity_emissions", "e", 0], + ["set", "commodities", "commodity_demand", "d", 0], + ["set", "tech_groups", "", "", 0], + ["set", "tech_annual", "", "", 0], + ["set", "tech_variable", "", "", 0], + ["set", "groups", "", "", 0], + ["param", "MinGenGroupTarget", "", "", 2], + ["param", "MinGenGroupWeight", "", "", 3], + ["param", "LinkedTechs", "", "", 3], + ["param", "SegFrac", "", "", 2], + ["param", "DemandSpecificDistribution", "", "", 4], + ["param", "CapacityToActivity", "", "", 2], + ["param", "PlanningReserveMargin", "", "", 2], + ["param", "GlobalDiscountRate", "", "", 0], + ["param", "MyopicBaseyear", "", "", 0], + ["param", "DiscountRate", "", "", 3], + ["param", "EmissionActivity", "", "", 6], + ["param", "EmissionLimit", "", "", 3], + ["param", "Demand", "", "", 3], + ["param", "TechOutputSplit", "", "", 4], + ["param", "TechInputSplit", "", "", 4], + ["param", "TechInputSplitAverage", "", "", 4], + ["param", "MinCapacity", "", "", 3], + ["param", "MaxCapacity", "", "", 3], + ["param", "MaxActivity", "", "", 3], + ["param", "MinActivity", "", "", 3], + ["param", "MaxResource", "", "", 2], + ["param", "GrowthRateMax", "", "", 2], + ["param", "GrowthRateSeed", "", "", 2], + ["param", "LifetimeTech", "", "", 2], + ["param", "LifetimeProcess", "", "", 3], + ["param", "LifetimeLoanTech", "", "", 2], + ["param", "CapacityFactorTech", "", "", 4], + ["param", "CapacityFactorProcess", "", "", 5], + ["param", "Efficiency", "", "", 5], + ["param", "ExistingCapacity", "", "", 3], + ["param", "CostInvest", "", "", 3], + ["param", "CostFixed", "", "", 4], + ["param", "CostVariable", "", "", 4], + ["param", "CapacityCredit", "", "", 4], + ["param", "RampUp", "", "", 2], + ["param", "RampDown", "", "", 2], + ["param", "StorageInitFrac", "", "", 3], + ["param", "StorageDuration", "", "", 2], + ] + + with open(ofile, "w") as f: + f.write("data ;\n\n") + # connect to the database + con = sqlite3.connect(ifile, isolation_level=None) + cur = ( + con.cursor() + ) # a database cursor is a control structure that enables traversal over the records in a database + con.text_factory = ( + str # this ensures data is explored with the correct UTF-8 encoding + ) + + # Return the full list of existing tables. + table_exist = cur.execute( + "SELECT name FROM sqlite_master WHERE type='table'" + ).fetchall() + table_exist = [i[0] for i in table_exist] + + for table in table_list: + if table[1] in table_exist: + query_table(table, f) + if options.mga_weight == "integer": + write_tech_mga(f) + if options.mga_weight == "normalized": + write_tech_sector(f) + + # Making sure the database is empty from the begining for a myopic solve + if options.myopic: + cur.execute( + "DELETE FROM Output_CapacityByPeriodAndTech WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_Emissions WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_Costs WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_Objective WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_VFlow_In WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_VFlow_Out WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_V_Capacity WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_Curtailment WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute( + "DELETE FROM Output_Duals WHERE scenario=" + + "'" + + str(options.scenario) + + "'" + ) + cur.execute("VACUUM") + con.commit() + + cur.close() + con.close() + + +class TemoaConfig(object): + states = (("mga", "exclusive"),) + + tokens = ( + "dot_dat", + "output", + "scenario", + "how_to_cite", + "version", + "solver", + "neos", + "keep_pyomo_lp_file", + "saveEXCEL", + "myopic" "myopic_periods" "keep_myopic_databases" "saveDUALS" "saveTEXTFILE", + "mgaslack", + "mgaiter", + "path_to_data", + "path_to_logs", + "mgaweight", + ) + + t_ANY_ignore = "[ \t]" + + def __init__(self, **kwargs): + # Make compatible with Python 2.7 and 3 + try: + import queue + except: + import Queue as queue + + self.__error = list() + self.__mga_todo = queue.Queue() + self.__mga_done = queue.Queue() + + self.file_location = None + self.dot_dat = list() # Use Kevin's name. + self.output = None # May update to a list if multiple output is required. + self.scenario = None + self.saveEXCEL = False + self.myopic = False + self.myopic_periods = 0 + self.KeepMyopicDBs = False + self.saveDUALS = False + self.saveTEXTFILE = False + self.how_to_cite = None + self.version = False + self.neos = False + self.generateSolverLP = False + self.keepPyomoLP = False + self.mga = None # mga slack value + self.mga_iter = None + self.mga_weight = None + + # To keep consistent with Kevin's argumetn parser, will be removed in the future. + self.graph_format = None + self.show_capacity = False + self.graph_type = "separate_vintages" + self.use_splines = False + + # Introduced during UI Development + self.path_to_data = re.sub( + "temoa_model$", "data_files", dirname(abspath(__file__)) + ) # Path to where automated excel and text log folder will be save as output. + self.path_to_logs = ( + self.path_to_data + sep + "debug_logs" + ) # Path to where debug logs will be generated for each run. By default in debug_logs folder in db_io. + self.path_to_lp_files = None + self.abort_temoa = False + + if "d_solver" in kwargs.keys(): + self.solver = kwargs["d_solver"] + else: + self.solver = None + + def __repr__(self): + width = 25 + spacer = "\n" + "-" * width + "\n" + msg = spacer + msg += "{:>{}s}: {}\n".format("Config file", width, self.file_location) + for i in self.dot_dat: + if self.dot_dat.index(i) == 0: + msg += "{:>{}s}: {}\n".format("Input file", width, i) + else: + msg += "{:>25s} {}\n".format(" ", i) + msg += "{:>{}s}: {}\n".format("Output file", width, self.output) + msg += "{:>{}s}: {}\n".format("Scenario", width, self.scenario) + msg += "{:>{}s}: {}\n".format("Spreadsheet output", width, self.saveEXCEL) + msg += "{:>{}s}: {}\n".format("Myopic scheme", width, self.myopic) + msg += "{:>{}s}: {}\n".format("Myopic years", width, self.myopic_periods) + msg += "{:>{}s}: {}\n".format( + "Retain myopic databases", width, self.KeepMyopicDBs + ) + msg += spacer + msg += "{:>{}s}: {}\n".format("Citation output status", width, self.how_to_cite) + msg += "{:>{}s}: {}\n".format("NEOS status", width, self.neos) + msg += "{:>{}s}: {}\n".format("Version output status", width, self.version) + msg += spacer + msg += "{:>{}s}: {}\n".format("Selected solver status", width, self.solver) + msg += "{:>{}s}: {}\n".format( + "Solver LP write status", width, self.generateSolverLP + ) + msg += "{:>{}s}: {}\n".format("Pyomo LP write status", width, self.keepPyomoLP) + msg += spacer + msg += "{:>{}s}: {}\n".format("MGA slack value", width, self.mga) + msg += "{:>{}s}: {}\n".format("MGA # of iterations", width, self.mga_iter) + msg += "{:>{}s}: {}\n".format("MGA weighting method", width, self.mga_weight) + msg += "**NOTE: If you are performing MGA runs, navigate to the DAT file and make any modifications to the MGA sets before proceeding." + return msg + + def t_ANY_COMMENT(self, t): + r"\#.*" + pass + + def t_dot_dat(self, t): + r"--input[\s\=]+[-\\\/\:\.\~\w]+(\.dat|\.db|\.sqlite)\b" + self.dot_dat.append(abspath(t.value.replace("=", " ").split()[1])) + + def t_output(self, t): + r"--output[\s\=]+[-\\\/\:\.\~\w]+(\.db|\.sqlite)\b" + self.output = abspath(t.value.replace("=", " ").split()[1]) + + def t_scenario(self, t): + r"--scenario[\s\=]+\w+\b" + self.scenario = t.value.replace("=", " ").split()[1] + + def t_saveEXCEL(self, t): + r"--saveEXCEL\b" + self.saveEXCEL = True + + def t_saveDUALS(self, t): + r"--saveDUALS\b" + self.saveDUALS = True + + def t_myopic(self, t): + r"--myopic\b" + self.myopic = True + + def t_myopic_periods(self, t): + r"--myopic_periods[\s\=]+[\d]+" + self.myopic_periods = int(t.value.replace("=", " ").split()[1]) + + def t_keep_myopic_databases(self, t): + r"--keep_myopic_databases\b" + self.KeepMyopicDBs = True + + def t_saveTEXTFILE(self, t): + r"--saveTEXTFILE\b" + self.saveTEXTFILE = True + + def t_path_to_data(self, t): + r"--path_to_data[\s\=]+[-\\\/\:\.\~\w\ ]+\b" + self.path_to_data = abspath(t.value.replace("=", ",").split(",")[1]) + + def t_path_to_logs(self, t): + r"--path_to_logs[\s\=]+[-\\\/\:\.\~\w\ ]+\b" + self.path_to_logs = abspath(t.value.replace("=", ",").split(",")[1]) + + def t_how_to_cite(self, t): + r"--how_to_cite\b" + self.how_to_cite = True + + def t_version(self, t): + r"--version\b" + self.version = True + + def t_neos(self, t): + r"--neos\b" + self.neos = True + + def t_solver(self, t): + r"--solver[\s\=]+\w+\b" + self.solver = t.value.replace("=", " ").split()[1] + + def t_keep_pyomo_lp_file(self, t): + r"--keep_pyomo_lp_file\b" + self.keepPyomoLP = True + + def t_begin_mga(self, t): + r"--mga[\s\=]+\{" + t.lexer.push_state("mga") + t.lexer.level = 1 + + def t_mga_mgaslack(self, t): + r"slack[\s\=]+[\.\d]+" + self.mga = float(t.value.replace("=", " ").split()[1]) + + def t_mga_mgaiter(self, t): + r"iteration[\s\=]+[\d]+" + self.mga_iter = int(t.value.replace("=", " ").split()[1]) + + def t_mga_mgaweight(self, t): + r"weight[\s\=]+(integer|normalized|distance)\b" + self.mga_weight = t.value.replace("=", " ").split()[1] + + def t_mga_end(self, t): + r"\}" + t.lexer.pop_state() + t.lexer.level -= 1 + + def t_ANY_newline(self, t): + r"\n+|(\r\n)+|\r+" # '\n' (In linux) = '\r\n' (In Windows) = '\r' (In Mac OS) + t.lexer.lineno += len(t.value) + + def t_ANY_error(self, t): + if not self.__error: + self.__error.append( + { + "line": [t.lineno, t.lineno], + "index": [t.lexpos, t.lexpos], + "value": t.value[0], + } + ) + elif t.lexpos - self.__error[-1]["index"][-1] == 1: + self.__error[-1]["line"][-1] = t.lineno + self.__error[-1]["index"][-1] = t.lexpos + self.__error[-1]["value"] += t.value[0] + else: + self.__error.append( + { + "line": [t.lineno, t.lineno], + "index": [t.lexpos, t.lexpos], + "value": t.value[0], + } + ) + t.lexer.skip(1) + + def next_mga(self): + if not self.__mga_todo.empty(): + self.__mga_done.put(self.scenario) + self.scenario = self.__mga_todo.get() + return True + else: + return False + + def build(self, **kwargs): + import ply.lex as lex, os, sys + + db_or_dat = True + # True means input file is a db file. False means input is a dat file. + + if "config" in kwargs: + if isfile(kwargs["config"]): + self.file_location = abspath(kwargs.pop("config")) + else: + msg = "No such file exists: {}".format(kwargs.pop("config")) + raise Exception(msg) + + self.lexer = lex.lex(module=self, **kwargs) + if self.file_location: + try: + with open(self.file_location, encoding="utf8") as f: + self.lexer.input(f.read()) + except: + with open(self.file_location, "r") as f: + self.lexer.input(f.read()) + while True: + tok = self.lexer.token() + if not tok: + break + + if self.__error: + width = 25 + msg = "\nIllegal character(s) in config file:\n" + msg += "-" * width + "\n" + for e in self.__error: + msg += "Line {} to {}: '{}'\n".format( + e["line"][0], e["line"][1], e["value"] + ) + msg += "-" * width + "\n" + sys.stderr.write(msg) + + try: + txt_file = open( + self.path_to_logs + os.sep + "Complete_OutputLog.log", "w" + ) + except BaseException as io_exc: + sys.stderr.write( + "Log file cannot be opened. Please check path. Trying to find:\n" + + self.path_to_logs + + " folder\n" + ) + txt_file = open("OutputLog.log", "w") + + txt_file.write(msg) + txt_file.close() + self.abort_temoa = True + + if not self.dot_dat: + raise Exception("Input file not specified.") + + for i in self.dot_dat: + if not isfile(i): + raise Exception("Cannot locate input file: {}".format(i)) + i_name, i_ext = splitext(i) + if (i_ext == ".dat") or (i_ext == ".txt"): + db_or_dat = False + elif ( + (i_ext == ".db") + or (i_ext == ".sqlite") + or (i_ext == ".sqlite3") + or (i_ext == "sqlitedb") + ): + db_or_dat = True + + if not self.output and db_or_dat: + raise Exception("Output file not specified.") + + if db_or_dat and not isfile(self.output): + raise Exception("Cannot locate output file: {}.".format(self.output)) + + if not self.scenario and db_or_dat: + raise Exception("Scenario name not specified.") + + if self.mga_iter: + for i in range(self.mga_iter): + self.__mga_todo.put(self.scenario + "_mga_" + str(i)) + + f = open(os.devnull, "w") + sys.stdout = f # Suppress the original DB_to_DAT.py output + + counter = 0 + + for ifile in self.dot_dat: + i_name, i_ext = splitext(ifile) + if i_ext != ".dat": + ofile = i_name + ".dat" + db_2_dat(ifile, ofile, self) + self.dot_dat[self.dot_dat.index(ifile)] = ofile + counter += 1 + f.close() + sys.stdout = sys.__stdout__ + if counter > 0: + sys.stderr.write("\n{} .db DD file(s) converted\n".format(counter)) diff --git a/temoa_model/temoa_initialize.py b/temoa_model/temoa_initialize.py index 3eb6bbea..c77b076b 100644 --- a/temoa_model/temoa_initialize.py +++ b/temoa_model/temoa_initialize.py @@ -32,54 +32,67 @@ from io import StringIO try: - from pyomo.core import ( - AbstractModel, BuildAction, Constraint, NonNegativeReals, Reals, Objective, Param, - Set, Var, minimize, value - ) + from pyomo.core import ( + AbstractModel, + BuildAction, + Constraint, + NonNegativeReals, + Reals, + Objective, + Param, + Set, + Var, + minimize, + value, + ) except: - msg = """ + msg = """ Unable to find 'pyomo.core.' Check to make sure pyomo is installed, and that you are running a version compatible with Temoa. """ - raise ImportError( msg ) - - -class TemoaModel( AbstractModel ): - def __init__( self, *args, **kwds ): - AbstractModel.__init__( self, *args, **kwds ) - self.processInputs = dict() - self.processOutputs = dict() - self.processLoans = dict() - self.activeFlow_rpsditvo = None - self.activeFlow_rpitvo = None - self.activeFlex_rpsditvo = None - self.activeFlex_rpitvo = None - self.activeFlowInStorage_rpsditvo = None - self.activeCurtailment_rpsditvo = None - self.activeActivity_rptv = None - self.activeCapacity_rtv = None - self.activeCapacityAvailable_rpt = None - self.activeCapacityAvailable_rptv = None - self.commodityDStreamProcess = dict() # The downstream process of a commodity during a period - self.commodityUStreamProcess = dict() # The upstream process of a commodity during a period - self.ProcessInputsByOutput = dict() - self.ProcessOutputsByInput = dict() - self.processTechs = dict() - self.processReservePeriods = dict() - self.processVintages = dict() - self.baseloadVintages = dict() - self.curtailmentVintages = dict() - self.storageVintages = dict() - self.rampVintages = dict() - self.inputsplitVintages = dict() - self.inputsplitaverageVintages = dict() - self.outputsplitVintages = dict() - self.ProcessByPeriodAndOutput = dict() - self.exportRegions = dict() - self.importRegions = dict() - self.flex_commodities = set() + raise ImportError(msg) + + +class TemoaModel(AbstractModel): + def __init__(self, *args, **kwds): + AbstractModel.__init__(self, *args, **kwds) + self.processInputs = dict() + self.processOutputs = dict() + self.processLoans = dict() + self.activeFlow_rpsditvo = None + self.activeFlow_rpitvo = None + self.activeFlex_rpsditvo = None + self.activeFlex_rpitvo = None + self.activeFlowInStorage_rpsditvo = None + self.activeCurtailment_rpsditvo = None + self.activeActivity_rptv = None + self.activeCapacity_rtv = None + self.activeCapacityAvailable_rpt = None + self.activeCapacityAvailable_rptv = None + self.commodityDStreamProcess = ( + dict() + ) # The downstream process of a commodity during a period + self.commodityUStreamProcess = ( + dict() + ) # The upstream process of a commodity during a period + self.ProcessInputsByOutput = dict() + self.ProcessOutputsByInput = dict() + self.processTechs = dict() + self.processReservePeriods = dict() + self.processVintages = dict() + self.baseloadVintages = dict() + self.curtailmentVintages = dict() + self.storageVintages = dict() + self.rampVintages = dict() + self.inputsplitVintages = dict() + self.inputsplitaverageVintages = dict() + self.outputsplitVintages = dict() + self.ProcessByPeriodAndOutput = dict() + self.exportRegions = dict() + self.importRegions = dict() + self.flex_commodities = set() # --------------------------------------------------------------- @@ -89,429 +102,462 @@ def __init__( self, *args, **kwds ): # parameter values. # --------------------------------------------------------------- -def isValidProcess ( self, r, p, i, t, v, o ): - """\ + +def isValidProcess(self, r, p, i, t, v, o): + """\ Returns a boolean (True or False) indicating whether, in any given period, a technology can take a specified input carrier and convert it to and specified output carrier. Not currently used. """ - index = (r, p, t, v) - if index in self.processInputs and index in self.processOutputs: - if i in self.processInputs[ index ]: - if o in self.processOutputs[ index ]: - return True - - return False - -def get_str_padding ( obj ): - return len(str( obj )) - -def CommodityBalanceConstraintErrorCheck ( vflow_out, vflow_in, r, p, s, d, c ): - if int is type(vflow_out): - flow_in_expr = StringIO() - vflow_in.pprint( ostream=flow_in_expr ) - msg = ("Unable to meet an interprocess '{}' transfer in ({}, {}, {}).\n" - 'No flow out. Constraint flow in:\n {}\n' - 'Possible reasons:\n' - " - Is there a missing period in set 'time_future'?\n" - " - Is there a missing tech in set 'tech_resource'?\n" - " - Is there a missing tech in set 'tech_production'?\n" - " - Is there a missing commodity in set 'commodity_physical'?\n" - ' - Are there missing entries in the Efficiency parameter?\n' - ' - Does a process need a longer LifetimeProcess parameter setting?') - raise Exception( msg.format( - r, c, s, d, p, flow_in_expr.getvalue() - )) - -def CommodityBalanceConstraintErrorCheckAnnual ( vflow_out, vflow_in, r, p, c ): - if int is type(vflow_out): - flow_in_expr = StringIO() - vflow_in.pprint( ostream=flow_in_expr ) - msg = ("Unable to meet an interprocess '{}' transfer in ({}, {}, {}).\n" - 'No flow out. Constraint flow in:\n {}\n' - 'Possible reasons:\n' - " - Is there a missing period in set 'time_future'?\n" - " - Is there a missing tech in set 'tech_resource'?\n" - " - Is there a missing tech in set 'tech_production'?\n" - " - Is there a missing commodity in set 'commodity_physical'?\n" - ' - Are there missing entries in the Efficiency parameter?\n' - ' - Does a process need a longer LifetimeProcess parameter setting?') - raise Exception( msg.format( - r, c, p, flow_in_expr.getvalue() - )) - -def DemandConstraintErrorCheck ( supply, r, p, s, d, dem ): - if int is type( supply ): - msg = ("Error: Demand '{}' for ({}, {}, {}) unable to be met by any " - 'technology.\n\tPossible reasons:\n' - ' - Is the Efficiency parameter missing an entry for this demand?\n' - ' - Does a tech that satisfies this demand need a longer ' - 'LifetimeProcess?\n') - raise Exception( msg.format(r, dem, p, s, d) ) - -def validate_time ( M ): - """ - We check for integer status here, rather then asking Pyomo to do this via - a 'within=Integers' clause in the definition so that we can have a very - specific error message. If we instead use Pyomo's mechanism, the - python invocation of Temoa throws an error (including a traceback) - that has proven to be scary and/or impenetrable for the typical modeler. - """ - for year in M.time_exist: - if isinstance(year, int): continue - - msg = ('Set "time_exist" requires integer-only elements.\n\n Invalid ' - 'element: "{}"') - raise Exception( msg.format( year )) - - for year in M.time_future: - if isinstance(year, int): continue - - msg = ('Set "time_future" requires integer-only elements.\n\n Invalid ' - 'element: "{}"') - raise Exception( msg.format( year )) - - if len( M.time_future ) < 2: - msg = ('Set "time_future" needs at least 2 specified years. Temoa ' - 'treats the integer numbers specified in this set as boundary years ' - 'between periods, and uses them to automatically ascertain the length ' - '(in years) of each period. Note that this means that there will be ' - 'one less optimization period than the number of elements in this set.' - ) - raise Exception( msg ) - - # Ensure that the time_exist < time_future - max_exist = max( M.time_exist ) - min_horizon = min( M.time_future ) - - if not ( max_exist < min_horizon ): - msg = ('All items in time_future must be larger than in time_exist.\n' - 'time_exist max: {}\ntime_future min: {}') - raise Exception( msg.format(max_exist, min_horizon) ) - - -def validate_SegFrac ( M ): - - total = sum( i for i in M.SegFrac.itervalues() ) - - if abs(float(total) - 1.0) > 0.001: - # We can't explicitly test for "!= 1.0" because of incremental rounding - # errors associated with the specification of SegFrac by time slice, - # but we check to make sure it is within the specified tolerance. - - key_padding = max(map( get_str_padding, M.SegFrac.sparse_iterkeys() )) - - format = "%%-%ds = %%s" % key_padding - # Works out to something like "%-25s = %s" - - items = sorted( M.SegFrac.items() ) - items = '\n '.join( format % (str(k), v) for k, v in items ) - - msg = ('The values of the SegFrac parameter do not sum to 1. Each item ' - 'in SegFrac represents a fraction of a year, so they must total to ' - '1. Current values:\n {}\n\tsum = {}') - - raise Exception( msg.format(items, total )) - - -def CheckEfficiencyIndices ( M ): - """ - Ensure that there are no unused items in any of the Efficiency index sets. - """ - c_physical = set( i for r, i, t, v, o in M.Efficiency.sparse_iterkeys() ) - techs = set( t for r, i, t, v, o in M.Efficiency.sparse_iterkeys() ) - c_outputs = set( o for r, i, t, v, o in M.Efficiency.sparse_iterkeys() ) - - symdiff = c_physical.symmetric_difference( M.commodity_physical ) - if symdiff: - msg = ('Unused or unspecified physical carriers. Either add or remove ' - 'the following elements to the Set commodity_physical.' - '\n\n Element(s): {}') - symdiff = (str(i) for i in symdiff) - raise Exception( msg.format( ', '.join(symdiff) )) - - symdiff = techs.symmetric_difference( M.tech_all ) - if symdiff: - msg = ('Unused or unspecified technologies. Either add or remove ' - 'the following technology(ies) to the tech_resource or ' - 'tech_production Sets.\n\n Technology(ies): {}') - symdiff = (str(i) for i in symdiff) - raise Exception( msg.format( ', '.join(symdiff) )) - - diff = M.commodity_demand - c_outputs - if diff: - msg = ('Unused or unspecified outputs. Either add or remove the ' - 'following elements to the commodity_demand Set.' - '\n\n Element(s): {}') - diff = (str(i) for i in diff) - raise Exception( msg.format( ', '.join(diff) )) - - -def CreateCapacityFactors ( M ): - """ - Steps to creating capacity factors: - 1. Collect all possible processes - 2. Find the ones _not_ specified in CapacityFactorProcess - 3. Set them, based on CapacityFactorTech. - """ - # Shorter names, for us lazy programmer types - CFP = M.CapacityFactorProcess - - # Step 1 - processes = set( (r, t, v) for r, i, t, v, o in M.Efficiency.sparse_iterkeys() ) - - all_cfs = set( - (r, s, d, t, v) - - for (r, t, v), s, d in cross_product( - processes, - M.time_season, - M.time_of_day - ) - ) - - # Step 2 - unspecified_cfs = all_cfs.difference( CFP.sparse_iterkeys() ) - - # Step 3 - - # Some hackery: We futz with _constructed because Pyomo thinks that this - # Param is already constructed. However, in our view, it is not yet, - # because we're specifically targeting values that have not yet been - # constructed, that we know are valid, and that we will need. - - if unspecified_cfs: - # CFP._constructed = False - for r, s, d, t, v in unspecified_cfs: - CFP[r, s, d, t, v] = M.CapacityFactorTech[r, s, d, t] - # CFP._constructed = True - - -def CreateLifetimes ( M ): - """ - Steps to creating lifetimes: - 1. Collect all possible processes - 2. Find the ones _not_ specified in LifetimeProcess and LifetimeLoanProcess - 3. Set them, based on Lifetime*Tech. - """ - - # Shorter names, for us lazy programmer types - LLN = M.LifetimeLoanProcess - LPR = M.LifetimeProcess - - # Step 1 - lprocesses = set( M.LifetimeLoanProcess_rtv ) - processes = set( M.LifetimeProcess_rtv ) - - - # Step 2 - unspecified_loan_lives = lprocesses.difference( LLN.sparse_iterkeys() ) - unspecified_tech_lives = processes.difference( LPR.sparse_iterkeys() ) - - # Step 3 - - # Some hackery: We futz with _constructed because Pyomo thinks that this - # Param is already constructed. However, in our view, it is not yet, - # because we're specifically targeting values that have not yet been - # constructed, that we know are valid, and that we will need. - - if unspecified_loan_lives: - # LLN._constructed = False - for r, t, v in unspecified_loan_lives: - LLN[r, t, v] = M.LifetimeLoanTech[ (r, t) ] - # LLN._constructed = True - - if unspecified_tech_lives: - # LPR._constructed = False - for r, t, v in unspecified_tech_lives: - LPR[r, t, v] = M.LifetimeTech[ (r, t) ] - # LPR._constructed = True - - -def CreateDemands ( M ): - """ - Steps to create the demand distributions - 1. Use Demand keys to ensure that all demands in commodity_demand are used - 2. Find any slices not set in DemandDefaultDistribution, and set them based - on the associated SegFrac slice. - 3. Validate that the DemandDefaultDistribution sums to 1. - 4. Find any per-demand DemandSpecificDistribution values not set, and set - set them from DemandDefaultDistribution. Note that this only sets a - distribution for an end-use demand if the user has *not* specified _any_ - anything for that end-use demand. Thus, it is up to the user to fully - specify the distribution, or not. No in-between. - 5. Validate that the per-demand distributions sum to 1. - """ - - # Step 0: some setup for a couple of reusable items - - # iget(3): 3 = magic number to specify the fourth column. Currently the - # demand in the tuple (r, s, d, dem) - DSD_dem_getter = iget(3) - - # iget(0): 0 = magic number to specify the first column. Currently the - # demand in the tuple (r, s, d, dem) - DSD_region_getter = iget(0) - - # Step 1 - used_dems = set(dem for r, p, dem in M.Demand.sparse_iterkeys()) - unused_dems = sorted(M.commodity_demand.difference( used_dems )) - if unused_dems: - for dem in unused_dems: - msg = ("Warning: Demand '{}' is unused\n") - SE.write( msg.format( dem ) ) - - # Step 2 - DDD = M.DemandDefaultDistribution # Shorter, for us lazy programmer types - unset_defaults = set(M.SegFrac.sparse_iterkeys()) - unset_defaults.difference_update( - DDD.sparse_iterkeys() ) - if unset_defaults: - # Some hackery because Pyomo thinks that this Param is constructed. - # However, in our view, it is not yet, because we're specifically - # targeting values that have not yet been constructed, that we know are - # valid, and that we will need. - # DDD._constructed = False - for tslice in unset_defaults: - DDD[ tslice ] = M.SegFrac[ tslice ] - # DDD._constructed = True - - # Step 3 - total = sum( i for i in DDD.itervalues() ) - if abs(value(total) - 1.0) > 0.001: - # We can't explicitly test for "!= 1.0" because of incremental rounding - # errors associated with the specification of demand shares by time slice, - # but we check to make sure it is within the specified tolerance. - - key_padding = max(map( get_str_padding, DDD.sparse_iterkeys() )) - - format = "%%-%ds = %%s" % key_padding - # Works out to something like "%-25s = %s" - - items = sorted( DDD.items() ) - items = '\n '.join( format % (str(k), v) for k, v in items ) - - msg = ('The values of the DemandDefaultDistribution parameter do not ' - 'sum to 1. The DemandDefaultDistribution specifies how end-use ' - 'demands are distributed among the time slices (i.e., time_season, ' - 'time_of_day), so together, the data must total to 1. Current ' - 'values:\n {}\n\tsum = {}') - - raise Exception( msg.format(items, total) ) - - # Step 4 - DSD = M.DemandSpecificDistribution - - demands_specified = set(map( DSD_dem_getter, - (i for i in DSD.sparse_iterkeys()) )) - unset_demand_distributions = used_dems.difference( demands_specified ) - unset_distributions = set( - cross_product(M.regions, M.time_season, M.time_of_day, unset_demand_distributions)) - - if unset_distributions: - # Some hackery because Pyomo thinks that this Param is constructed. - # However, in our view, it is not yet, because we're specifically - # targeting values that have not yet been constructed, that we know are - # valid, and that we will need. - # DSD._constructed = False - for r, s, d, dem in unset_distributions: - DSD[r, s, d, dem] = DDD[s, d] - # DSD._constructed = True - - # Step 5 - used_reg_dems = set((r, dem) for r, p, dem in M.Demand.sparse_iterkeys()) - for (r, dem) in used_reg_dems: - keys = (k for k in DSD.sparse_iterkeys() if DSD_dem_getter(k) == dem and DSD_region_getter(k) == r) - total = sum( DSD[ i ] for i in keys ) - if abs(value(total) - 1.0) > 0.001: - # We can't explicitly test for "!= 1.0" because of incremental rounding - # errors associated with the specification of demand shares by time slice, - # but we check to make sure it is within the specified tolerance. - - keys = [k for k in DSD.sparse_iterkeys() if DSD_dem_getter(k) == dem and DSD_region_getter(k) == r] - key_padding = max(map( get_str_padding, keys )) - - format = "%%-%ds = %%s" % key_padding - # Works out to something like "%-25s = %s" - - items = sorted( (k, DSD[k]) for k in keys ) - items = '\n '.join( format % (str(k), v) for k, v in items ) - - msg = ('The values of the DemandSpecificDistribution parameter do not ' - 'sum to 1. The DemandSpecificDistribution specifies how end-use ' - 'demands are distributed per time-slice (i.e., time_season, ' - 'time_of_day). Within each end-use Demand, then, the distribution ' - 'must total to 1.\n\n Demand-specific distribution in error: ' - ' {}\n\n {}\n\tsum = {}') - - raise Exception( msg.format(dem, items, total) ) - - -def CreateCosts ( M ): - """ - Steps to creating fixed and variable costs: - 1. Collect all possible cost indices (CostFixed, CostVariable) - 2. Find the ones _not_ specified in CostFixed and CostVariable - 3. Set them, based on Cost*VintageDefault - """ - - # Shorter names, for us lazy programmer types - CF = M.CostFixed - CV = M.CostVariable - - # Step 1 - fixed_indices = set( M.CostFixed_rptv ) - var_indices = set( M.CostVariable_rptv ) - - # Step 2 - unspecified_fixed_prices = fixed_indices.difference( CF.sparse_iterkeys() ) - unspecified_var_prices = var_indices.difference( CV.sparse_iterkeys() ) - - # Step 3 - - # Some hackery: We futz with _constructed because Pyomo thinks that this - # Param is already constructed. However, in our view, it is not yet, - # because we're specifically targeting values that have not yet been - # constructed, that we know are valid, and that we will need. - - if unspecified_fixed_prices: - # CF._constructed = False - for r, p, t, v in unspecified_fixed_prices: - if (r, t, v) in M.CostFixedVintageDefault: - CF[r, p, t, v] = M.CostFixedVintageDefault[r, t, v] - # CF._constructed = True - - if unspecified_var_prices: - # CV._constructed = False - for r, p, t, v in unspecified_var_prices: - if (r, t, v) in M.CostVariableVintageDefault: - CV[r, p, t, v] = M.CostVariableVintageDefault[r, t, v] - # CV._constructed = True - - -def init_set_time_optimize ( M ): - return sorted( M.time_future )[:-1] - - -def init_set_vintage_exist ( M ): - return sorted( M.time_exist ) - - -def init_set_vintage_optimize ( M ): - return sorted( M.time_optimize ) - - -def CreateRegionalIndices ( M ): - regional_indices = set() - for r_i in M.regions: - if "-" in r_i: - raise Exception("Individual region names can not have '-' in their names: "+str(r_i)) - for r_j in M.regions: - if r_i == r_j: - regional_indices.add(r_i) - else: - regional_indices.add(r_i+"-"+r_j) - return regional_indices + index = (r, p, t, v) + if index in self.processInputs and index in self.processOutputs: + if i in self.processInputs[index]: + if o in self.processOutputs[index]: + return True + + return False + + +def get_str_padding(obj): + return len(str(obj)) + + +def CommodityBalanceConstraintErrorCheck(vflow_out, vflow_in, r, p, s, d, c): + if int is type(vflow_out): + flow_in_expr = StringIO() + vflow_in.pprint(ostream=flow_in_expr) + msg = ( + "Unable to meet an interprocess '{}' transfer in ({}, {}, {}).\n" + "No flow out. Constraint flow in:\n {}\n" + "Possible reasons:\n" + " - Is there a missing period in set 'time_future'?\n" + " - Is there a missing tech in set 'tech_resource'?\n" + " - Is there a missing tech in set 'tech_production'?\n" + " - Is there a missing commodity in set 'commodity_physical'?\n" + " - Are there missing entries in the Efficiency parameter?\n" + " - Does a process need a longer LifetimeProcess parameter setting?" + ) + raise Exception(msg.format(r, c, s, d, p, flow_in_expr.getvalue())) + + +def CommodityBalanceConstraintErrorCheckAnnual(vflow_out, vflow_in, r, p, c): + if int is type(vflow_out): + flow_in_expr = StringIO() + vflow_in.pprint(ostream=flow_in_expr) + msg = ( + "Unable to meet an interprocess '{}' transfer in ({}, {}, {}).\n" + "No flow out. Constraint flow in:\n {}\n" + "Possible reasons:\n" + " - Is there a missing period in set 'time_future'?\n" + " - Is there a missing tech in set 'tech_resource'?\n" + " - Is there a missing tech in set 'tech_production'?\n" + " - Is there a missing commodity in set 'commodity_physical'?\n" + " - Are there missing entries in the Efficiency parameter?\n" + " - Does a process need a longer LifetimeProcess parameter setting?" + ) + raise Exception(msg.format(r, c, p, flow_in_expr.getvalue())) + + +def DemandConstraintErrorCheck(supply, r, p, s, d, dem): + if int is type(supply): + msg = ( + "Error: Demand '{}' for ({}, {}, {}) unable to be met by any " + "technology.\n\tPossible reasons:\n" + " - Is the Efficiency parameter missing an entry for this demand?\n" + " - Does a tech that satisfies this demand need a longer " + "LifetimeProcess?\n" + ) + raise Exception(msg.format(r, dem, p, s, d)) + + +def validate_time(M): + """ + We check for integer status here, rather then asking Pyomo to do this via + a 'within=Integers' clause in the definition so that we can have a very + specific error message. If we instead use Pyomo's mechanism, the + python invocation of Temoa throws an error (including a traceback) + that has proven to be scary and/or impenetrable for the typical modeler. + """ + for year in M.time_exist: + if isinstance(year, int): + continue + + msg = ( + 'Set "time_exist" requires integer-only elements.\n\n Invalid ' + 'element: "{}"' + ) + raise Exception(msg.format(year)) + + for year in M.time_future: + if isinstance(year, int): + continue + + msg = ( + 'Set "time_future" requires integer-only elements.\n\n Invalid ' + 'element: "{}"' + ) + raise Exception(msg.format(year)) + + if len(M.time_future) < 2: + msg = ( + 'Set "time_future" needs at least 2 specified years. Temoa ' + "treats the integer numbers specified in this set as boundary years " + "between periods, and uses them to automatically ascertain the length " + "(in years) of each period. Note that this means that there will be " + "one less optimization period than the number of elements in this set." + ) + raise Exception(msg) + + # Ensure that the time_exist < time_future + max_exist = max(M.time_exist) + min_horizon = min(M.time_future) + + if not (max_exist < min_horizon): + msg = ( + "All items in time_future must be larger than in time_exist.\n" + "time_exist max: {}\ntime_future min: {}" + ) + raise Exception(msg.format(max_exist, min_horizon)) + + +def validate_SegFrac(M): + total = sum(i for i in M.SegFrac.itervalues()) + + if abs(float(total) - 1.0) > 0.001: + # We can't explicitly test for "!= 1.0" because of incremental rounding + # errors associated with the specification of SegFrac by time slice, + # but we check to make sure it is within the specified tolerance. + + key_padding = max(map(get_str_padding, M.SegFrac.sparse_iterkeys())) + + format = "%%-%ds = %%s" % key_padding + # Works out to something like "%-25s = %s" + + items = sorted(M.SegFrac.items()) + items = "\n ".join(format % (str(k), v) for k, v in items) + + msg = ( + "The values of the SegFrac parameter do not sum to 1. Each item " + "in SegFrac represents a fraction of a year, so they must total to " + "1. Current values:\n {}\n\tsum = {}" + ) + + raise Exception(msg.format(items, total)) + + +def CheckEfficiencyIndices(M): + """ + Ensure that there are no unused items in any of the Efficiency index sets. + """ + c_physical = set(i for r, i, t, v, o in M.Efficiency.sparse_iterkeys()) + techs = set(t for r, i, t, v, o in M.Efficiency.sparse_iterkeys()) + c_outputs = set(o for r, i, t, v, o in M.Efficiency.sparse_iterkeys()) + + symdiff = c_physical.symmetric_difference(M.commodity_physical) + if symdiff: + msg = ( + "Unused or unspecified physical carriers. Either add or remove " + "the following elements to the Set commodity_physical." + "\n\n Element(s): {}" + ) + symdiff = (str(i) for i in symdiff) + raise Exception(msg.format(", ".join(symdiff))) + + symdiff = techs.symmetric_difference(M.tech_all) + if symdiff: + msg = ( + "Unused or unspecified technologies. Either add or remove " + "the following technology(ies) to the tech_resource or " + "tech_production Sets.\n\n Technology(ies): {}" + ) + symdiff = (str(i) for i in symdiff) + raise Exception(msg.format(", ".join(symdiff))) + + diff = M.commodity_demand - c_outputs + if diff: + msg = ( + "Unused or unspecified outputs. Either add or remove the " + "following elements to the commodity_demand Set." + "\n\n Element(s): {}" + ) + diff = (str(i) for i in diff) + raise Exception(msg.format(", ".join(diff))) + + +def CreateCapacityFactors(M): + """ + Steps to creating capacity factors: + 1. Collect all possible processes + 2. Find the ones _not_ specified in CapacityFactorProcess + 3. Set them, based on CapacityFactorTech. + """ + # Shorter names, for us lazy programmer types + CFP = M.CapacityFactorProcess + + # Step 1 + processes = set((r, t, v) for r, i, t, v, o in M.Efficiency.sparse_iterkeys()) + + all_cfs = set( + (r, s, d, t, v) + for (r, t, v), s, d in cross_product(processes, M.time_season, M.time_of_day) + ) + + # Step 2 + unspecified_cfs = all_cfs.difference(CFP.sparse_iterkeys()) + + # Step 3 + + # Some hackery: We futz with _constructed because Pyomo thinks that this + # Param is already constructed. However, in our view, it is not yet, + # because we're specifically targeting values that have not yet been + # constructed, that we know are valid, and that we will need. + + if unspecified_cfs: + # CFP._constructed = False + for r, s, d, t, v in unspecified_cfs: + CFP[r, s, d, t, v] = M.CapacityFactorTech[r, s, d, t] + # CFP._constructed = True + + +def CreateLifetimes(M): + """ + Steps to creating lifetimes: + 1. Collect all possible processes + 2. Find the ones _not_ specified in LifetimeProcess and LifetimeLoanProcess + 3. Set them, based on Lifetime*Tech. + """ + + # Shorter names, for us lazy programmer types + LLN = M.LifetimeLoanProcess + LPR = M.LifetimeProcess + + # Step 1 + lprocesses = set(M.LifetimeLoanProcess_rtv) + processes = set(M.LifetimeProcess_rtv) + + # Step 2 + unspecified_loan_lives = lprocesses.difference(LLN.sparse_iterkeys()) + unspecified_tech_lives = processes.difference(LPR.sparse_iterkeys()) + + # Step 3 + + # Some hackery: We futz with _constructed because Pyomo thinks that this + # Param is already constructed. However, in our view, it is not yet, + # because we're specifically targeting values that have not yet been + # constructed, that we know are valid, and that we will need. + + if unspecified_loan_lives: + # LLN._constructed = False + for r, t, v in unspecified_loan_lives: + LLN[r, t, v] = M.LifetimeLoanTech[(r, t)] + # LLN._constructed = True + + if unspecified_tech_lives: + # LPR._constructed = False + for r, t, v in unspecified_tech_lives: + LPR[r, t, v] = M.LifetimeTech[(r, t)] + # LPR._constructed = True + + +def CreateDemands(M): + """ + Steps to create the demand distributions + 1. Use Demand keys to ensure that all demands in commodity_demand are used + 2. Find any slices not set in DemandDefaultDistribution, and set them based + on the associated SegFrac slice. + 3. Validate that the DemandDefaultDistribution sums to 1. + 4. Find any per-demand DemandSpecificDistribution values not set, and set + set them from DemandDefaultDistribution. Note that this only sets a + distribution for an end-use demand if the user has *not* specified _any_ + anything for that end-use demand. Thus, it is up to the user to fully + specify the distribution, or not. No in-between. + 5. Validate that the per-demand distributions sum to 1. + """ + + # Step 0: some setup for a couple of reusable items + + # iget(3): 3 = magic number to specify the fourth column. Currently the + # demand in the tuple (r, s, d, dem) + DSD_dem_getter = iget(3) + + # iget(0): 0 = magic number to specify the first column. Currently the + # demand in the tuple (r, s, d, dem) + DSD_region_getter = iget(0) + + # Step 1 + used_dems = set(dem for r, p, dem in M.Demand.sparse_iterkeys()) + unused_dems = sorted(M.commodity_demand.difference(used_dems)) + if unused_dems: + for dem in unused_dems: + msg = "Warning: Demand '{}' is unused\n" + SE.write(msg.format(dem)) + + # Step 2 + DDD = M.DemandDefaultDistribution # Shorter, for us lazy programmer types + unset_defaults = set(M.SegFrac.sparse_iterkeys()) + unset_defaults.difference_update(DDD.sparse_iterkeys()) + if unset_defaults: + # Some hackery because Pyomo thinks that this Param is constructed. + # However, in our view, it is not yet, because we're specifically + # targeting values that have not yet been constructed, that we know are + # valid, and that we will need. + # DDD._constructed = False + for tslice in unset_defaults: + DDD[tslice] = M.SegFrac[tslice] + # DDD._constructed = True + + # Step 3 + total = sum(i for i in DDD.itervalues()) + if abs(value(total) - 1.0) > 0.001: + # We can't explicitly test for "!= 1.0" because of incremental rounding + # errors associated with the specification of demand shares by time slice, + # but we check to make sure it is within the specified tolerance. + + key_padding = max(map(get_str_padding, DDD.sparse_iterkeys())) + + format = "%%-%ds = %%s" % key_padding + # Works out to something like "%-25s = %s" + + items = sorted(DDD.items()) + items = "\n ".join(format % (str(k), v) for k, v in items) + + msg = ( + "The values of the DemandDefaultDistribution parameter do not " + "sum to 1. The DemandDefaultDistribution specifies how end-use " + "demands are distributed among the time slices (i.e., time_season, " + "time_of_day), so together, the data must total to 1. Current " + "values:\n {}\n\tsum = {}" + ) + + raise Exception(msg.format(items, total)) + + # Step 4 + DSD = M.DemandSpecificDistribution + + demands_specified = set(map(DSD_dem_getter, (i for i in DSD.sparse_iterkeys()))) + unset_demand_distributions = used_dems.difference(demands_specified) + unset_distributions = set( + cross_product( + M.regions, M.time_season, M.time_of_day, unset_demand_distributions + ) + ) + + if unset_distributions: + # Some hackery because Pyomo thinks that this Param is constructed. + # However, in our view, it is not yet, because we're specifically + # targeting values that have not yet been constructed, that we know are + # valid, and that we will need. + # DSD._constructed = False + for r, s, d, dem in unset_distributions: + DSD[r, s, d, dem] = DDD[s, d] + # DSD._constructed = True + + # Step 5 + used_reg_dems = set((r, dem) for r, p, dem in M.Demand.sparse_iterkeys()) + for r, dem in used_reg_dems: + keys = ( + k + for k in DSD.sparse_iterkeys() + if DSD_dem_getter(k) == dem and DSD_region_getter(k) == r + ) + total = sum(DSD[i] for i in keys) + if abs(value(total) - 1.0) > 0.001: + # We can't explicitly test for "!= 1.0" because of incremental rounding + # errors associated with the specification of demand shares by time slice, + # but we check to make sure it is within the specified tolerance. + + keys = [ + k + for k in DSD.sparse_iterkeys() + if DSD_dem_getter(k) == dem and DSD_region_getter(k) == r + ] + key_padding = max(map(get_str_padding, keys)) + + format = "%%-%ds = %%s" % key_padding + # Works out to something like "%-25s = %s" + + items = sorted((k, DSD[k]) for k in keys) + items = "\n ".join(format % (str(k), v) for k, v in items) + + msg = ( + "The values of the DemandSpecificDistribution parameter do not " + "sum to 1. The DemandSpecificDistribution specifies how end-use " + "demands are distributed per time-slice (i.e., time_season, " + "time_of_day). Within each end-use Demand, then, the distribution " + "must total to 1.\n\n Demand-specific distribution in error: " + " {}\n\n {}\n\tsum = {}" + ) + + raise Exception(msg.format(dem, items, total)) + + +def CreateCosts(M): + """ + Steps to creating fixed and variable costs: + 1. Collect all possible cost indices (CostFixed, CostVariable) + 2. Find the ones _not_ specified in CostFixed and CostVariable + 3. Set them, based on Cost*VintageDefault + """ + + # Shorter names, for us lazy programmer types + CF = M.CostFixed + CV = M.CostVariable + + # Step 1 + fixed_indices = set(M.CostFixed_rptv) + var_indices = set(M.CostVariable_rptv) + + # Step 2 + unspecified_fixed_prices = fixed_indices.difference(CF.sparse_iterkeys()) + unspecified_var_prices = var_indices.difference(CV.sparse_iterkeys()) + + # Step 3 + + # Some hackery: We futz with _constructed because Pyomo thinks that this + # Param is already constructed. However, in our view, it is not yet, + # because we're specifically targeting values that have not yet been + # constructed, that we know are valid, and that we will need. + + if unspecified_fixed_prices: + # CF._constructed = False + for r, p, t, v in unspecified_fixed_prices: + if (r, t, v) in M.CostFixedVintageDefault: + CF[r, p, t, v] = M.CostFixedVintageDefault[r, t, v] + # CF._constructed = True + + if unspecified_var_prices: + # CV._constructed = False + for r, p, t, v in unspecified_var_prices: + if (r, t, v) in M.CostVariableVintageDefault: + CV[r, p, t, v] = M.CostVariableVintageDefault[r, t, v] + # CV._constructed = True + + +def init_set_time_optimize(M): + return sorted(M.time_future)[:-1] + + +def init_set_vintage_exist(M): + return sorted(M.time_exist) + + +def init_set_vintage_optimize(M): + return sorted(M.time_optimize) + + +def CreateRegionalIndices(M): + regional_indices = set() + for r_i in M.regions: + if "-" in r_i: + raise Exception( + "Individual region names can not have '-' in their names: " + str(r_i) + ) + for r_j in M.regions: + if r_i == r_j: + regional_indices.add(r_i) + else: + regional_indices.add(r_i + "-" + r_j) + return regional_indices # --------------------------------------------------------------- @@ -524,404 +570,435 @@ def CreateRegionalIndices ( M ): # dictionaries that serve as the basis of the sparse indices. # --------------------------------------------------------------- -def CreateSparseDicts ( M ): - """ - This function creates customized dictionaries with only the key / value pairs - defined in the associated datafile. The dictionaries defined here are used to - do the sparse matrix indexing for all parameters, variables, and constraints - in the model. The function works by looping over the sparse indices in the - Efficiency table. For each iteration of the loop, the appropriate key / value - pairs are defined as appropriate for each dictionary. - """ - l_first_period = min( M.time_future ) - l_exist_indices = M.ExistingCapacity.sparse_keys() - l_used_techs = set() - - # The basis for the dictionaries are the sparse keys defined in the - # Efficiency table. - for r, i, t, v, o in M.Efficiency.sparse_iterkeys(): - if "-" in r and t not in M.tech_exchange: - raise Exception("Technology "+str(t)+" seems to be an exchange \ - technology but it is not specified in tech_exchange set") - l_process = (r, t, v) - l_lifetime = value(M.LifetimeProcess[ l_process ]) - # Do some error checking for the user. - if v in M.vintage_exist: - if l_process not in l_exist_indices: - msg = ('Warning: %s has a specified Efficiency, but does not ' - 'have any existing install base (ExistingCapacity).\n') - SE.write( msg % str(l_process) ) - continue - if 0 == M.ExistingCapacity[ l_process ]: - msg = ('Notice: Unnecessary specification of ExistingCapacity ' - '%s. If specifying a capacity of zero, you may simply ' - 'omit the declaration.\n') - SE.write( msg % str(l_process) ) - continue - if v + l_lifetime <= l_first_period: - msg = ('\nWarning: %s specified as ExistingCapacity, but its ' - 'LifetimeProcess parameter does not extend past the beginning ' - 'of time_future. (i.e. useless parameter)' - '\n\tLifetime: %s' - '\n\tFirst period: %s\n') - SE.write( msg % (l_process, l_lifetime, l_first_period) ) - continue - - eindex = (r, i, t, v, o) - if 0 == M.Efficiency[ eindex ]: - msg = ('\nNotice: Unnecessary specification of Efficiency %s. If ' - 'specifying an efficiency of zero, you may simply omit the ' - 'declaration.\n') - SE.write( msg % str(eindex) ) - continue - - l_used_techs.add( t ) - - if t in M.tech_flex: - M.flex_commodities.add(o) - - # Add in the period (p) index, since it's not included in the efficiency - # table. - for p in M.time_optimize: - # Can't build a vintage before it's been invented - if p < v: continue - - pindex = (r, p, t, v) - - if v in M.time_optimize: - l_loan_life = value(M.LifetimeLoanProcess[ l_process ]) - if v + l_loan_life >= p: - M.processLoans[ pindex ] = True - - # if tech is no longer active, don't include it - if v + l_lifetime <= p: continue - - # Here we utilize the indices in a given iteration of the loop to - # create the dictionary keys, and initialize the associated values - # to an empty set. - if pindex not in M.processInputs: - M.processInputs[ pindex ] = set() - M.processOutputs[ pindex ] = set() - if (r, p, i) not in M.commodityDStreamProcess: - M.commodityDStreamProcess[r, p, i] = set() - if (r, p, o) not in M.commodityUStreamProcess: - M.commodityUStreamProcess[r, p, o] = set() - if (r, p, t, v, i) not in M.ProcessOutputsByInput: - M.ProcessOutputsByInput[r, p, t, v, i] = set() - if (r, p, t, v, o) not in M.ProcessInputsByOutput: - M.ProcessInputsByOutput[r, p, t, v, o] = set() - if (r, t) not in M.processTechs: - M.processTechs[r, t] = set() - # While the dictionary just above indentifies the vintage (v) - # associated with each (r,p,t) we need to do the same below for various - # technology subsets. - if (r, p, t) not in M.processVintages: - M.processVintages[r, p, t] = set() - if t in M.tech_curtailment and (r, p, t) not in M.curtailmentVintages: - M.curtailmentVintages[r, p, t] = set() - if t in M.tech_baseload and (r, p, t) not in M.baseloadVintages: - M.baseloadVintages[r, p, t] = set() - if t in M.tech_storage and (r, p, t) not in M.storageVintages: - M.storageVintages[r, p, t] = set() - if t in M.tech_ramping and (r, p, t) not in M.rampVintages: - M.rampVintages[r, p,t] = set() - if (r, p, i, t) in M.TechInputSplit.sparse_iterkeys() and (r, p, i, t) not in M.inputsplitVintages: - M.inputsplitVintages[r,p,i,t] = set() - if (r, p, i, t) in M.TechInputSplitAverage.sparse_iterkeys() and (r, p, i, t) not in M.inputsplitaverageVintages: - M.inputsplitaverageVintages[r,p,i,t] = set() - if (r, p, t, o) in M.TechOutputSplit.sparse_iterkeys() and (r, p, t, o) not in M.outputsplitVintages: - M.outputsplitVintages[r,p,t,o] = set() - if t in M.tech_resource and (r,p,o) not in M.ProcessByPeriodAndOutput: - M.ProcessByPeriodAndOutput[r,p,o] = set() - if t in M.tech_reserve and (r, p) not in M.processReservePeriods: - M.processReservePeriods[r, p] = set() - if t in M.tech_exchange and (r[:r.find("-")], p, i) not in M.exportRegions: - M.exportRegions[r[:r.find("-")], p, i] = set() #since t is in M.tech_exchange, r here has *-* format (e.g. 'US-Mexico'). - #r[:r.find("-")] extracts the region index before the "-". - if t in M.tech_exchange and (r[r.find("-")+1:], p, o) not in M.importRegions: - M.importRegions[r[r.find("-")+1:], p, o] = set() - - # Now that all of the keys have been defined, and values initialized - # to empty sets, we fill in the appropriate values for each - # dictionary. - M.processInputs[ pindex ].add( i ) - M.processOutputs[pindex ].add( o ) - M.commodityDStreamProcess[r, p, i].add( (t, v) ) - M.commodityUStreamProcess[r, p, o].add( (t, v) ) - M.ProcessOutputsByInput[r, p, t, v, i].add( o ) - M.ProcessInputsByOutput[r, p, t, v, o].add( i ) - M.processTechs[r, t].add( (p, v) ) - M.processVintages[r, p, t].add( v ) - if t in M.tech_curtailment: - M.curtailmentVintages[r, p, t].add( v ) - if t in M.tech_baseload: - M.baseloadVintages[r, p, t].add( v ) - if t in M.tech_storage: - M.storageVintages[r, p, t].add( v ) - if t in M.tech_ramping: - M.rampVintages[r, p, t].add( v ) - if (r, p, i, t) in M.TechInputSplit.sparse_iterkeys(): - M.inputsplitVintages[r,p,i,t].add( v ) - if (r, p, i, t) in M.TechInputSplitAverage.sparse_iterkeys(): - M.inputsplitaverageVintages[r,p,i,t].add( v ) - if (r, p, t, o) in M.TechOutputSplit.sparse_iterkeys(): - M.outputsplitVintages[r,p,t,o].add( v ) - if t in M.tech_resource: - M.ProcessByPeriodAndOutput[r,p,o].add(( i,t,v )) - if t in M.tech_reserve: - M.processReservePeriods[r, p].add( (t,v) ) - if t in M.tech_exchange: - M.exportRegions[r[:r.find("-")], p, i].add((r[r.find("-")+1:], t, v, o)) - if t in M.tech_exchange: - M.importRegions[r[r.find("-")+1:], p, o].add((r[:r.find("-")], t, v, i)) - - for (r, i, t, v, o) in M.Efficiency.sparse_iterkeys(): - if t in M.tech_exchange: - reg = r.split('-')[0] - for (r1, i1, t1, v1, o1) in M.Efficiency.sparse_iterkeys(): - if (r1==reg) & (o1==i): - for p in M.time_optimize: - if (r1, p, o1) not in M.commodityDStreamProcess: - msg = ('The {} process in region {} has no downstream process other ' - 'than a transport ({}) process. This will cause the commodity balance ' - 'constraint to fail. Add a dummy technology downstream of the {} ' - 'process to the Efficiency table to avoid this issue. ' - 'The dummy technology should have the same region and vintage as the {} process, ' - 'an efficiency of 100%, with the {} commodity as the input and output. ' - 'The dummy technology may also need a corresponding row in the ExistingCapacity ' - 'table with capacity values that equal the {} technology.') - raise Exception( msg.format(t1, r1, t, t1, t1, o1, t1) ) - - l_unused_techs = M.tech_all - l_used_techs - if l_unused_techs: - msg = ("Notice: '{}' specified as technology, but it is not utilized in " - 'the Efficiency parameter.\n') - for i in sorted( l_unused_techs ): - SE.write( msg.format( i )) - - M.activeFlow_rpsditvo = set( - (r, p, s, d, i, t, v, o) - - for r,p,t in M.processVintages.keys() if t not in M.tech_annual - for v in M.processVintages[ r, p, t ] - for i in M.processInputs[ r, p, t, v ] - for o in M.ProcessOutputsByInput[ r, p, t, v, i ] - for s in M.time_season - for d in M.time_of_day - ) - - M.activeFlow_rpitvo = set( - (r, p, i, t, v, o) - - for r,p,t in M.processVintages.keys() if t in M.tech_annual - for v in M.processVintages[ r, p, t ] - for i in M.processInputs[ r, p, t, v ] - for o in M.ProcessOutputsByInput[ r, p, t, v, i ] - ) - - M.activeFlex_rpsditvo = set( - (r, p, s, d, i, t, v, o) - - for r,p,t in M.processVintages.keys() if (t not in M.tech_annual) and (t in M.tech_flex) - for v in M.processVintages[ r, p, t ] - for i in M.processInputs[ r, p, t, v ] - for o in M.ProcessOutputsByInput[ r, p, t, v, i ] - for s in M.time_season - for d in M.time_of_day - ) - - M.activeFlex_rpitvo = set( - (r, p, i, t, v, o) - - for r,p,t in M.processVintages.keys() if (t in M.tech_annual) and (t in M.tech_flex) - for v in M.processVintages[ r, p, t ] - for i in M.processInputs[ r, p, t, v ] - for o in M.ProcessOutputsByInput[ r, p, t, v, i ] - ) - - M.activeFlowInStorage_rpsditvo = set( - (r, p, s, d, i, t, v, o) - - for r,p,t in M.processVintages.keys() if t in M.tech_storage - for v in M.processVintages[ r, p, t ] - for i in M.processInputs[ r, p, t, v ] - for o in M.ProcessOutputsByInput[ r, p, t, v, i ] - for s in M.time_season - for d in M.time_of_day - ) - - - - M.activeCurtailment_rpsditvo = set( - (r, p, s, d, i, t, v, o) - - for r,p,t in M.curtailmentVintages.keys() - for v in M.curtailmentVintages[ r, p, t ] - for i in M.processInputs[ r, p, t, v ] - for o in M.ProcessOutputsByInput[ r, p, t, v, i ] - for s in M.time_season - for d in M.time_of_day - ) - - M.activeActivity_rptv = set( - (r, p, t, v) - - for r,p,t in M.processVintages.keys() - for v in M.processVintages[ r, p, t ] - ) - - M.activeCapacity_rtv = set( - (r, t, v) - - for r,p,t in M.processVintages.keys() - for v in M.processVintages[ r, p, t ] - ) - - M.activeCapacityAvailable_rpt = set( - (r, p, t) - - for r,p,t in M.processVintages.keys() - if M.processVintages[ r, p, t ] - ) - - M.activeCapacityAvailable_rptv = set( - (r, p, t, v) - - for r,p,t in M.processVintages.keys() - for v in M.processVintages[ r, p, t ] - ) + +def CreateSparseDicts(M): + """ + This function creates customized dictionaries with only the key / value pairs + defined in the associated datafile. The dictionaries defined here are used to + do the sparse matrix indexing for all parameters, variables, and constraints + in the model. The function works by looping over the sparse indices in the + Efficiency table. For each iteration of the loop, the appropriate key / value + pairs are defined as appropriate for each dictionary. + """ + l_first_period = min(M.time_future) + l_exist_indices = M.ExistingCapacity.sparse_keys() + l_used_techs = set() + + # The basis for the dictionaries are the sparse keys defined in the + # Efficiency table. + for r, i, t, v, o in M.Efficiency.sparse_iterkeys(): + if "-" in r and t not in M.tech_exchange: + raise Exception( + "Technology " + + str(t) + + " seems to be an exchange \ + technology but it is not specified in tech_exchange set" + ) + l_process = (r, t, v) + l_lifetime = value(M.LifetimeProcess[l_process]) + # Do some error checking for the user. + if v in M.vintage_exist: + if l_process not in l_exist_indices: + msg = ( + "Warning: %s has a specified Efficiency, but does not " + "have any existing install base (ExistingCapacity).\n" + ) + SE.write(msg % str(l_process)) + continue + if 0 == M.ExistingCapacity[l_process]: + msg = ( + "Notice: Unnecessary specification of ExistingCapacity " + "%s. If specifying a capacity of zero, you may simply " + "omit the declaration.\n" + ) + SE.write(msg % str(l_process)) + continue + if v + l_lifetime <= l_first_period: + msg = ( + "\nWarning: %s specified as ExistingCapacity, but its " + "LifetimeProcess parameter does not extend past the beginning " + "of time_future. (i.e. useless parameter)" + "\n\tLifetime: %s" + "\n\tFirst period: %s\n" + ) + SE.write(msg % (l_process, l_lifetime, l_first_period)) + continue + + eindex = (r, i, t, v, o) + if 0 == M.Efficiency[eindex]: + msg = ( + "\nNotice: Unnecessary specification of Efficiency %s. If " + "specifying an efficiency of zero, you may simply omit the " + "declaration.\n" + ) + SE.write(msg % str(eindex)) + continue + + l_used_techs.add(t) + + if t in M.tech_flex: + M.flex_commodities.add(o) + + # Add in the period (p) index, since it's not included in the efficiency + # table. + for p in M.time_optimize: + # Can't build a vintage before it's been invented + if p < v: + continue + + pindex = (r, p, t, v) + + if v in M.time_optimize: + l_loan_life = value(M.LifetimeLoanProcess[l_process]) + if v + l_loan_life >= p: + M.processLoans[pindex] = True + + # if tech is no longer active, don't include it + if v + l_lifetime <= p: + continue + + # Here we utilize the indices in a given iteration of the loop to + # create the dictionary keys, and initialize the associated values + # to an empty set. + if pindex not in M.processInputs: + M.processInputs[pindex] = set() + M.processOutputs[pindex] = set() + if (r, p, i) not in M.commodityDStreamProcess: + M.commodityDStreamProcess[r, p, i] = set() + if (r, p, o) not in M.commodityUStreamProcess: + M.commodityUStreamProcess[r, p, o] = set() + if (r, p, t, v, i) not in M.ProcessOutputsByInput: + M.ProcessOutputsByInput[r, p, t, v, i] = set() + if (r, p, t, v, o) not in M.ProcessInputsByOutput: + M.ProcessInputsByOutput[r, p, t, v, o] = set() + if (r, t) not in M.processTechs: + M.processTechs[r, t] = set() + # While the dictionary just above indentifies the vintage (v) + # associated with each (r,p,t) we need to do the same below for various + # technology subsets. + if (r, p, t) not in M.processVintages: + M.processVintages[r, p, t] = set() + if t in M.tech_curtailment and (r, p, t) not in M.curtailmentVintages: + M.curtailmentVintages[r, p, t] = set() + if t in M.tech_baseload and (r, p, t) not in M.baseloadVintages: + M.baseloadVintages[r, p, t] = set() + if t in M.tech_storage and (r, p, t) not in M.storageVintages: + M.storageVintages[r, p, t] = set() + if t in M.tech_ramping and (r, p, t) not in M.rampVintages: + M.rampVintages[r, p, t] = set() + if (r, p, i, t) in M.TechInputSplit.sparse_iterkeys() and ( + r, + p, + i, + t, + ) not in M.inputsplitVintages: + M.inputsplitVintages[r, p, i, t] = set() + if (r, p, i, t) in M.TechInputSplitAverage.sparse_iterkeys() and ( + r, + p, + i, + t, + ) not in M.inputsplitaverageVintages: + M.inputsplitaverageVintages[r, p, i, t] = set() + if (r, p, t, o) in M.TechOutputSplit.sparse_iterkeys() and ( + r, + p, + t, + o, + ) not in M.outputsplitVintages: + M.outputsplitVintages[r, p, t, o] = set() + if t in M.tech_resource and (r, p, o) not in M.ProcessByPeriodAndOutput: + M.ProcessByPeriodAndOutput[r, p, o] = set() + if t in M.tech_reserve and (r, p) not in M.processReservePeriods: + M.processReservePeriods[r, p] = set() + if t in M.tech_exchange and (r[: r.find("-")], p, i) not in M.exportRegions: + M.exportRegions[ + r[: r.find("-")], p, i + ] = ( + set() + ) # since t is in M.tech_exchange, r here has *-* format (e.g. 'US-Mexico'). + # r[:r.find("-")] extracts the region index before the "-". + if ( + t in M.tech_exchange + and (r[r.find("-") + 1 :], p, o) not in M.importRegions + ): + M.importRegions[r[r.find("-") + 1 :], p, o] = set() + + # Now that all of the keys have been defined, and values initialized + # to empty sets, we fill in the appropriate values for each + # dictionary. + M.processInputs[pindex].add(i) + M.processOutputs[pindex].add(o) + M.commodityDStreamProcess[r, p, i].add((t, v)) + M.commodityUStreamProcess[r, p, o].add((t, v)) + M.ProcessOutputsByInput[r, p, t, v, i].add(o) + M.ProcessInputsByOutput[r, p, t, v, o].add(i) + M.processTechs[r, t].add((p, v)) + M.processVintages[r, p, t].add(v) + if t in M.tech_curtailment: + M.curtailmentVintages[r, p, t].add(v) + if t in M.tech_baseload: + M.baseloadVintages[r, p, t].add(v) + if t in M.tech_storage: + M.storageVintages[r, p, t].add(v) + if t in M.tech_ramping: + M.rampVintages[r, p, t].add(v) + if (r, p, i, t) in M.TechInputSplit.sparse_iterkeys(): + M.inputsplitVintages[r, p, i, t].add(v) + if (r, p, i, t) in M.TechInputSplitAverage.sparse_iterkeys(): + M.inputsplitaverageVintages[r, p, i, t].add(v) + if (r, p, t, o) in M.TechOutputSplit.sparse_iterkeys(): + M.outputsplitVintages[r, p, t, o].add(v) + if t in M.tech_resource: + M.ProcessByPeriodAndOutput[r, p, o].add((i, t, v)) + if t in M.tech_reserve: + M.processReservePeriods[r, p].add((t, v)) + if t in M.tech_exchange: + M.exportRegions[r[: r.find("-")], p, i].add( + (r[r.find("-") + 1 :], t, v, o) + ) + if t in M.tech_exchange: + M.importRegions[r[r.find("-") + 1 :], p, o].add( + (r[: r.find("-")], t, v, i) + ) + + for r, i, t, v, o in M.Efficiency.sparse_iterkeys(): + if t in M.tech_exchange: + reg = r.split("-")[0] + for r1, i1, t1, v1, o1 in M.Efficiency.sparse_iterkeys(): + if (r1 == reg) & (o1 == i): + for p in M.time_optimize: + if (r1, p, o1) not in M.commodityDStreamProcess: + msg = ( + "The {} process in region {} has no downstream process other " + "than a transport ({}) process. This will cause the commodity balance " + "constraint to fail. Add a dummy technology downstream of the {} " + "process to the Efficiency table to avoid this issue. " + "The dummy technology should have the same region and vintage as the {} process, " + "an efficiency of 100%, with the {} commodity as the input and output. " + "The dummy technology may also need a corresponding row in the ExistingCapacity " + "table with capacity values that equal the {} technology." + ) + raise Exception(msg.format(t1, r1, t, t1, t1, o1, t1)) + + l_unused_techs = M.tech_all - l_used_techs + if l_unused_techs: + msg = ( + "Notice: '{}' specified as technology, but it is not utilized in " + "the Efficiency parameter.\n" + ) + for i in sorted(l_unused_techs): + SE.write(msg.format(i)) + + M.activeFlow_rpsditvo = set( + (r, p, s, d, i, t, v, o) + for r, p, t in M.processVintages.keys() + if t not in M.tech_annual + for v in M.processVintages[r, p, t] + for i in M.processInputs[r, p, t, v] + for o in M.ProcessOutputsByInput[r, p, t, v, i] + for s in M.time_season + for d in M.time_of_day + ) + + M.activeFlow_rpitvo = set( + (r, p, i, t, v, o) + for r, p, t in M.processVintages.keys() + if t in M.tech_annual + for v in M.processVintages[r, p, t] + for i in M.processInputs[r, p, t, v] + for o in M.ProcessOutputsByInput[r, p, t, v, i] + ) + + M.activeFlex_rpsditvo = set( + (r, p, s, d, i, t, v, o) + for r, p, t in M.processVintages.keys() + if (t not in M.tech_annual) and (t in M.tech_flex) + for v in M.processVintages[r, p, t] + for i in M.processInputs[r, p, t, v] + for o in M.ProcessOutputsByInput[r, p, t, v, i] + for s in M.time_season + for d in M.time_of_day + ) + + M.activeFlex_rpitvo = set( + (r, p, i, t, v, o) + for r, p, t in M.processVintages.keys() + if (t in M.tech_annual) and (t in M.tech_flex) + for v in M.processVintages[r, p, t] + for i in M.processInputs[r, p, t, v] + for o in M.ProcessOutputsByInput[r, p, t, v, i] + ) + + M.activeFlowInStorage_rpsditvo = set( + (r, p, s, d, i, t, v, o) + for r, p, t in M.processVintages.keys() + if t in M.tech_storage + for v in M.processVintages[r, p, t] + for i in M.processInputs[r, p, t, v] + for o in M.ProcessOutputsByInput[r, p, t, v, i] + for s in M.time_season + for d in M.time_of_day + ) + + M.activeCurtailment_rpsditvo = set( + (r, p, s, d, i, t, v, o) + for r, p, t in M.curtailmentVintages.keys() + for v in M.curtailmentVintages[r, p, t] + for i in M.processInputs[r, p, t, v] + for o in M.ProcessOutputsByInput[r, p, t, v, i] + for s in M.time_season + for d in M.time_of_day + ) + + M.activeActivity_rptv = set( + (r, p, t, v) + for r, p, t in M.processVintages.keys() + for v in M.processVintages[r, p, t] + ) + + M.activeCapacity_rtv = set( + (r, t, v) + for r, p, t in M.processVintages.keys() + for v in M.processVintages[r, p, t] + ) + + M.activeCapacityAvailable_rpt = set( + (r, p, t) for r, p, t in M.processVintages.keys() if M.processVintages[r, p, t] + ) + + M.activeCapacityAvailable_rptv = set( + (r, p, t, v) + for r, p, t in M.processVintages.keys() + for v in M.processVintages[r, p, t] + ) + + # --------------------------------------------------------------- # Create sparse parameter indices. -# These functions are called from temoa_model.py and use the sparse keys +# These functions are called from temoa_model.py and use the sparse keys # associated with specific parameters. # --------------------------------------------------------------- -def CapacityFactorProcessIndices ( M ): - indices = set( - (r, s, d, t, v) - for r, i, t, v, o in M.Efficiency.sparse_iterkeys() - for s in M.time_season - for d in M.time_of_day - ) +def CapacityFactorProcessIndices(M): + indices = set( + (r, s, d, t, v) + for r, i, t, v, o in M.Efficiency.sparse_iterkeys() + for s in M.time_season + for d in M.time_of_day + ) + + return indices + + +def CapacityFactorTechIndices(M): + indices = set((r, s, d, t) for r, s, d, t, v in M.CapacityFactor_rsdtv) + + return indices - return indices -def CapacityFactorTechIndices ( M ): - indices = set( - (r, s, d, t) +def CostFixedIndices(M): + return M.activeActivity_rptv - for r, s, d, t, v in M.CapacityFactor_rsdtv - ) - return indices +def CostVariableIndices(M): + return M.activeActivity_rptv -def CostFixedIndices ( M ): - return M.activeActivity_rptv -def CostVariableIndices ( M ): - return M.activeActivity_rptv +def CostInvestIndices(M): + indices = set((r, t, v) for r, p, t, v in M.processLoans) -def CostInvestIndices ( M ): - indices = set( - (r, t, v) + return indices - for r, p, t, v in M.processLoans - ) - return indices +def RegionalGlobalInitializedIndices(M): + from itertools import permutations -def RegionalGlobalInitializedIndices ( M ): - from itertools import permutations - indices = set() - for n in range(1,len(M.regions)+1): - regional_perms = permutations(M.regions,n) - for i in regional_perms: - indices.add("+".join(i)) - indices.add('global') - indices = indices.union(M.RegionalIndices) + indices = set() + for n in range(1, len(M.regions) + 1): + regional_perms = permutations(M.regions, n) + for i in regional_perms: + indices.add("+".join(i)) + indices.add("global") + indices = indices.union(M.RegionalIndices) - return indices + return indices -def EmissionActivityIndices ( M ): - indices = set( - (r, e, i, t, v, o) +def EmissionActivityIndices(M): + indices = set( + (r, e, i, t, v, o) + for r, i, t, v, o in M.Efficiency.sparse_iterkeys() + for e in M.commodity_emissions + ) - for r, i, t, v, o in M.Efficiency.sparse_iterkeys() - for e in M.commodity_emissions - ) + return indices - return indices -def EnergyConsumptionByPeriodInputAndTechVariableIndices ( M ): - indices = set( - (p, i, t) +def EnergyConsumptionByPeriodInputAndTechVariableIndices(M): + indices = set( + (p, i, t) + for i, t, v, o in M.Efficiency.sparse_iterkeys() + for p in M.time_optimize + ) - for i, t, v, o in M.Efficiency.sparse_iterkeys() - for p in M.time_optimize - ) + return indices - return indices - -def ActivityByPeriodTechAndOutputVariableIndices ( M ): - indices = set( - (p, t, o) - for i, t, v, o in M.Efficiency.sparse_iterkeys() - for p in M.time_optimize - ) +def ActivityByPeriodTechAndOutputVariableIndices(M): + indices = set( + (p, t, o) + for i, t, v, o in M.Efficiency.sparse_iterkeys() + for p in M.time_optimize + ) - return indices + return indices -def EmissionActivityByPeriodAndTechVariableIndices ( M ): - indices = set( - (e, p, t) - for e, i, t, v, o in M.EmissionActivity.sparse_iterkeys() - for p in M.time_optimize - ) +def EmissionActivityByPeriodAndTechVariableIndices(M): + indices = set( + (e, p, t) + for e, i, t, v, o in M.EmissionActivity.sparse_iterkeys() + for p in M.time_optimize + ) - return indices - + return indices -def ModelProcessLifeIndices ( M ): - """\ + +def ModelProcessLifeIndices(M): + """\ Returns the set of sensical (region, period, tech, vintage) tuples. The tuple indicates the periods in which a process is active, distinct from TechLifeFracIndices that returns indices only for processes that EOL mid-period. """ - return M.activeActivity_rptv + return M.activeActivity_rptv + -def LifetimeProcessIndices ( M ): - """\ +def LifetimeProcessIndices(M): + """\ Based on the Efficiency parameter's indices, this function returns the set of process indices that may be specified in the LifetimeProcess parameter. """ - indices = set( - (r, t, v) + indices = set((r, t, v) for r, i, t, v, o in M.Efficiency.sparse_iterkeys()) - for r, i, t, v, o in M.Efficiency.sparse_iterkeys() - ) + return indices - return indices -def LifetimeLoanProcessIndices ( M ): - """\ +def LifetimeLoanProcessIndices(M): + """\ Based on the Efficiency parameter's indices and time_future parameter, this function returns the set of process indices that may be specified in the CostInvest parameter. """ - min_period = min( M.vintage_optimize ) + min_period = min(M.vintage_optimize) - indices = set( - (r, t, v) + indices = set( + (r, t, v) for r, i, t, v, o in M.Efficiency.sparse_iterkeys() if v >= min_period + ) - for r, i, t, v, o in M.Efficiency.sparse_iterkeys() - if v >= min_period - ) + return indices - return indices # --------------------------------------------------------------- # Create sparse indices for decision variables. @@ -929,70 +1006,77 @@ def LifetimeLoanProcessIndices ( M ): # created above in CreateSparseDicts() # --------------------------------------------------------------- -def CapacityVariableIndices ( M ): - return M.activeCapacity_rtv -def CapacityAvailableVariableIndices ( M ): - return M.activeCapacityAvailable_rpt +def CapacityVariableIndices(M): + return M.activeCapacity_rtv + + +def CapacityAvailableVariableIndices(M): + return M.activeCapacityAvailable_rpt + + +def CapacityAvailableVariableIndicesVintage(M): + return M.activeCapacityAvailable_rptv -def CapacityAvailableVariableIndicesVintage ( M ): - return M.activeCapacityAvailable_rptv -def FlowVariableIndices ( M ): - return M.activeFlow_rpsditvo +def FlowVariableIndices(M): + return M.activeFlow_rpsditvo -def FlowVariableAnnualIndices ( M ): - return M.activeFlow_rpitvo +def FlowVariableAnnualIndices(M): + return M.activeFlow_rpitvo -def FlexVariablelIndices ( M ): - return M.activeFlex_rpsditvo -def FlexVariableAnnualIndices ( M ): - return M.activeFlex_rpitvo +def FlexVariablelIndices(M): + return M.activeFlex_rpsditvo -def FlowInStorageVariableIndices ( M ): - return M.activeFlowInStorage_rpsditvo +def FlexVariableAnnualIndices(M): + return M.activeFlex_rpitvo -def CurtailmentVariableIndices ( M ): - return M.activeCurtailment_rpsditvo +def FlowInStorageVariableIndices(M): + return M.activeFlowInStorage_rpsditvo -def CapacityConstraintIndices ( M ): - capacity_indices = set( - (r, p, s, d, t, v) - for r, p, t, v in M.activeActivity_rptv if t not in M.tech_annual - for s in M.time_season - for d in M.time_of_day - ) +def CurtailmentVariableIndices(M): + return M.activeCurtailment_rpsditvo - return capacity_indices -def LinkedTechConstraintIndices ( M ): - linkedtech_indices = set( - (r, p, s, d, t, v, e) +def CapacityConstraintIndices(M): + capacity_indices = set( + (r, p, s, d, t, v) + for r, p, t, v in M.activeActivity_rptv + if t not in M.tech_annual + for s in M.time_season + for d in M.time_of_day + ) - for r, t, e in M.LinkedTechs.sparse_iterkeys() - for p in M.time_optimize if (r, p, t) in M.processVintages.keys() - for v in M.processVintages[ r, p, t ] if (r, p, t, v) in M.activeActivity_rptv - for s in M.time_season - for d in M.time_of_day + return capacity_indices - ) - return linkedtech_indices +def LinkedTechConstraintIndices(M): + linkedtech_indices = set( + (r, p, s, d, t, v, e) + for r, t, e in M.LinkedTechs.sparse_iterkeys() + for p in M.time_optimize + if (r, p, t) in M.processVintages.keys() + for v in M.processVintages[r, p, t] + if (r, p, t, v) in M.activeActivity_rptv + for s in M.time_season + for d in M.time_of_day + ) -def CapacityAnnualConstraintIndices ( M ): - capacity_indices = set( - (r, p, t, v) + return linkedtech_indices - for r, p, t, v in M.activeActivity_rptv if t in M.tech_annual - ) +def CapacityAnnualConstraintIndices(M): + capacity_indices = set( + (r, p, t, v) for r, p, t, v in M.activeActivity_rptv if t in M.tech_annual + ) + + return capacity_indices - return capacity_indices # --------------------------------------------------------------- # Create sparse indices for constraints. @@ -1000,230 +1084,230 @@ def CapacityAnnualConstraintIndices ( M ): # created above in CreateSparseDicts() # --------------------------------------------------------------- -def DemandActivityConstraintIndices ( M ): - """\ + +def DemandActivityConstraintIndices(M): + """\ This function returns a set of sparse indices that are used in the DemandActivity constraint. It returns a tuple of the form: (p,s,d,t,v,dem,first_s,first_d) where "dem" is a demand commodity, and "first_s" and "first_d" are the reference season and time-of-day, respectively used to ensure demand activity remains consistent across time slices. """ - first_s = M.time_season.first() - first_d = M.time_of_day.first() - for r,p,t,v,dem in M.ProcessInputsByOutput.keys(): - if dem in M.commodity_demand and t not in M.tech_annual: - for s in M.time_season: - for d in M.time_of_day: - if s != first_s or d != first_d: - yield (r,p,s,d,t,v,dem,first_s,first_d) - -def DemandConstraintIndices ( M ): - used_dems = set((r,dem) for r, p, dem in M.Demand.sparse_iterkeys()) - DSD_keys = M.DemandSpecificDistribution.sparse_keys() - dem_slices = { (r,dem) : set( - (s, d) - for s in M.time_season - for d in M.time_of_day - if (r, s, d, dem) in DSD_keys ) - for (r,dem) in used_dems - } - - indices = set( - (r, p, s, d, dem) - - for r, p, dem in M.Demand.sparse_iterkeys() - for s, d in dem_slices[ (r,dem) ] - ) - - return indices - -def BaseloadDiurnalConstraintIndices ( M ): - indices = set( - (r, p, s, d, t, v) - - for r,p,t in M.baseloadVintages.keys() - for v in M.baseloadVintages[ r, p, t ] - for s in M.time_season - for d in M.time_of_day - ) - - return indices - -def RegionalExchangeCapacityConstraintIndices ( M ): - indices = set( - (r_e, r_i, t, v) - - for r_e, p, i in M.exportRegions.keys() - for r_i, t, v, o in M.exportRegions[r_e, p, i] - ) - - return indices - -def CommodityBalanceConstraintIndices ( M ): - # Generate indices only for those commodities that are produced by - # technologies with varying output at the time slice level. - period_commodity_with_up = set( M.commodityUStreamProcess.keys() ) - period_commodity_with_dn = set( M.commodityDStreamProcess.keys() ) - period_commodity = period_commodity_with_up.intersection( period_commodity_with_dn ) - indices = set( - (r, p, s, d, o) - - for r, p, o in period_commodity #r in this line includes interregional transfer combinations (not needed). - if r in M.regions # this line ensures only the regions are included. - for t, v in M.commodityUStreamProcess[ r, p, o ] - if (r, t) not in M.tech_storage and t not in M.tech_annual - for s in M.time_season - for d in M.time_of_day - ) - - return indices - - -def CommodityBalanceAnnualConstraintIndices ( M ): - # Generate indices only for those commodities that are produced by - # technologies with constant annual output. - period_commodity_with_up = set( M.commodityUStreamProcess.keys() ) - period_commodity_with_dn = set( M.commodityDStreamProcess.keys() ) - period_commodity = period_commodity_with_up.intersection( period_commodity_with_dn ) - indices = set( - (r, p, o) - - for r, p, o in period_commodity #r in this line includes interregional transfer combinations (not needed). - if r in M.regions # this line ensures only the regions are included. - for t, v in M.commodityUStreamProcess[ r, p, o ] - if (r, t) not in M.tech_storage and t in M.tech_annual - ) - - return indices - - -def StorageVariableIndices ( M ): - indices = set( - (r, p, s, d, t, v) - - for r, p, t in M.storageVintages.keys() - for s in M.time_season - for d in M.time_of_day - for v in M.storageVintages[ r, p, t ] - - ) - - return indices - - -def StorageInitIndices ( M ): - indices = set( - (r, t, v) - - for r,p,t in M.storageVintages.keys() - for v in M.storageVintages[ r, p, t ] - ) - - return indices - - -def StorageInitConstraintIndices ( M ): - indices = set( - (r,t,v) - - for r,t,v in M.StorageInitFrac.sparse_iterkeys() - ) - - return indices - - -def RampConstraintDayIndices ( M ): - indices = set( - (r, p, s, d, t, v) - - for r,p,t in M.rampVintages.keys() - for s in M.time_season - for d in M.time_of_day - for v in M.rampVintages[ r, p, t ] - ) - - return indices - -def RampConstraintSeasonIndices ( M ): - indices = set( - (r, p, s, t, v) - - for r, p,t in M.rampVintages.keys() - for s in M.time_season - for v in M.rampVintages[ r, p, t ] - ) - - return indices - -def RampConstraintPeriodIndices ( M ): - indices = set( - (r, p, t, v) - - for r, p,t in M.rampVintages.keys() - for v in M.rampVintages[ r, p, t ] ) - - return indices - -def ReserveMarginIndices ( M ): - indices = set( - (r, p , s , d ) - - for r in M.regions - for p in M.time_optimize - for s in M.time_season - for d in M.time_of_day - ) - return indices - -def TechInputSplitConstraintIndices ( M ): - indices = set( - (r, p, s, d, i, t, v) + first_s = M.time_season.first() + first_d = M.time_of_day.first() + for r, p, t, v, dem in M.ProcessInputsByOutput.keys(): + if dem in M.commodity_demand and t not in M.tech_annual: + for s in M.time_season: + for d in M.time_of_day: + if s != first_s or d != first_d: + yield (r, p, s, d, t, v, dem, first_s, first_d) + + +def DemandConstraintIndices(M): + used_dems = set((r, dem) for r, p, dem in M.Demand.sparse_iterkeys()) + DSD_keys = M.DemandSpecificDistribution.sparse_keys() + dem_slices = { + (r, dem): set( + (s, d) + for s in M.time_season + for d in M.time_of_day + if (r, s, d, dem) in DSD_keys + ) + for (r, dem) in used_dems + } + + indices = set( + (r, p, s, d, dem) + for r, p, dem in M.Demand.sparse_iterkeys() + for s, d in dem_slices[(r, dem)] + ) + + return indices + + +def BaseloadDiurnalConstraintIndices(M): + indices = set( + (r, p, s, d, t, v) + for r, p, t in M.baseloadVintages.keys() + for v in M.baseloadVintages[r, p, t] + for s in M.time_season + for d in M.time_of_day + ) + + return indices + + +def RegionalExchangeCapacityConstraintIndices(M): + indices = set( + (r_e, r_i, t, v) + for r_e, p, i in M.exportRegions.keys() + for r_i, t, v, o in M.exportRegions[r_e, p, i] + ) + + return indices + + +def CommodityBalanceConstraintIndices(M): + # Generate indices only for those commodities that are produced by + # technologies with varying output at the time slice level. + period_commodity_with_up = set(M.commodityUStreamProcess.keys()) + period_commodity_with_dn = set(M.commodityDStreamProcess.keys()) + period_commodity = period_commodity_with_up.intersection(period_commodity_with_dn) + indices = set( + (r, p, s, d, o) + for r, p, o in period_commodity # r in this line includes interregional transfer combinations (not needed). + if r in M.regions # this line ensures only the regions are included. + for t, v in M.commodityUStreamProcess[r, p, o] + if (r, t) not in M.tech_storage and t not in M.tech_annual + for s in M.time_season + for d in M.time_of_day + ) + + return indices + + +def CommodityBalanceAnnualConstraintIndices(M): + # Generate indices only for those commodities that are produced by + # technologies with constant annual output. + period_commodity_with_up = set(M.commodityUStreamProcess.keys()) + period_commodity_with_dn = set(M.commodityDStreamProcess.keys()) + period_commodity = period_commodity_with_up.intersection(period_commodity_with_dn) + indices = set( + (r, p, o) + for r, p, o in period_commodity # r in this line includes interregional transfer combinations (not needed). + if r in M.regions # this line ensures only the regions are included. + for t, v in M.commodityUStreamProcess[r, p, o] + if (r, t) not in M.tech_storage and t in M.tech_annual + ) + + return indices + + +def StorageVariableIndices(M): + indices = set( + (r, p, s, d, t, v) + for r, p, t in M.storageVintages.keys() + for s in M.time_season + for d in M.time_of_day + for v in M.storageVintages[r, p, t] + ) + + return indices + + +def StorageInitIndices(M): + indices = set( + (r, t, v) + for r, p, t in M.storageVintages.keys() + for v in M.storageVintages[r, p, t] + ) + + return indices + + +def StorageInitConstraintIndices(M): + indices = set((r, t, v) for r, t, v in M.StorageInitFrac.sparse_iterkeys()) + + return indices + + +def RampConstraintDayIndices(M): + indices = set( + (r, p, s, d, t, v) + for r, p, t in M.rampVintages.keys() + for s in M.time_season + for d in M.time_of_day + for v in M.rampVintages[r, p, t] + ) + + return indices - for r, p, i, t in M.inputsplitVintages.keys() if t not in M.tech_annual and t not in M.tech_variable - for v in M.inputsplitVintages[ r, p, i, t ] - for s in M.time_season - for d in M.time_of_day - ) - return indices +def RampConstraintSeasonIndices(M): + indices = set( + (r, p, s, t, v) + for r, p, t in M.rampVintages.keys() + for s in M.time_season + for v in M.rampVintages[r, p, t] + ) + + return indices + -def TechInputSplitAnnualConstraintIndices ( M ): - indices = set( - (r, p, i, t, v) +def RampConstraintPeriodIndices(M): + indices = set( + (r, p, t, v) + for r, p, t in M.rampVintages.keys() + for v in M.rampVintages[r, p, t] + ) + + return indices + + +def ReserveMarginIndices(M): + indices = set( + (r, p, s, d) + for r in M.regions + for p in M.time_optimize + for s in M.time_season + for d in M.time_of_day + ) + return indices + + +def TechInputSplitConstraintIndices(M): + indices = set( + (r, p, s, d, i, t, v) + for r, p, i, t in M.inputsplitVintages.keys() + if t not in M.tech_annual and t not in M.tech_variable + for v in M.inputsplitVintages[r, p, i, t] + for s in M.time_season + for d in M.time_of_day + ) - for r, p, i, t in M.inputsplitVintages.keys() if t in M.tech_annual - for v in M.inputsplitVintages[ r, p, i, t ] - ) + return indices + + +def TechInputSplitAnnualConstraintIndices(M): + indices = set( + (r, p, i, t, v) + for r, p, i, t in M.inputsplitVintages.keys() + if t in M.tech_annual + for v in M.inputsplitVintages[r, p, i, t] + ) - return indices + return indices -def TechInputSplitAverageConstraintIndices ( M ): - indices = set( - (r, p, i, t, v) - for r, p, i, t in M.inputsplitaverageVintages.keys() if t in M.tech_variable - for v in M.inputsplitaverageVintages[ r, p, i, t ] - ) - return indices +def TechInputSplitAverageConstraintIndices(M): + indices = set( + (r, p, i, t, v) + for r, p, i, t in M.inputsplitaverageVintages.keys() + if t in M.tech_variable + for v in M.inputsplitaverageVintages[r, p, i, t] + ) + return indices -def TechOutputSplitConstraintIndices ( M ): - indices = set( - (r, p, s, d, t, v, o) - for r, p, t, o in M.outputsplitVintages.keys() if t not in M.tech_annual - for v in M.outputsplitVintages[ r, p, t, o ] - for s in M.time_season - for d in M.time_of_day - ) +def TechOutputSplitConstraintIndices(M): + indices = set( + (r, p, s, d, t, v, o) + for r, p, t, o in M.outputsplitVintages.keys() + if t not in M.tech_annual + for v in M.outputsplitVintages[r, p, t, o] + for s in M.time_season + for d in M.time_of_day + ) - return indices + return indices -def TechOutputSplitAnnualConstraintIndices ( M ): - indices = set( - (r, p, t, v, o) - for r, p, t, o in M.outputsplitVintages.keys() if t in M.tech_annual and t not in M.tech_variable - for v in M.outputsplitVintages[ r, p, t, o ] - ) +def TechOutputSplitAnnualConstraintIndices(M): + indices = set( + (r, p, t, v, o) + for r, p, t, o in M.outputsplitVintages.keys() + if t in M.tech_annual and t not in M.tech_variable + for v in M.outputsplitVintages[r, p, t, o] + ) - return indices \ No newline at end of file + return indices diff --git a/temoa_model/temoa_mga.py b/temoa_model/temoa_mga.py index a1548790..a0a30fd6 100644 --- a/temoa_model/temoa_mga.py +++ b/temoa_model/temoa_mga.py @@ -22,62 +22,68 @@ from pyomo.environ import * from temoa_rules import TotalCost_rule -def ActivityObj_rule ( M, prev_act_t ): - new_act = 0 - for t in M.V_ActivityByTech: - if t in prev_act_t: - new_act += prev_act_t[ t ] * M.V_ActivityByTech[t] - return new_act -def SlackedObjective_rule ( M, prev_cost, mga_slack ): - # It is important that this function name *not* match its constraint name - # plus '_rule', else Pyomo will attempt to be too smart. That is, at the - # first implementation, the associated constraint name is - # 'PreviousSlackedObjective', for which Pyomo searches the namespace for - # 'PreviousSlackedObjective_rule'. We decidedly do not want Pyomo - # trying to call this function because it is not aware of the second arg. - slackcost = (1 + mga_slack) * prev_cost - oldobjective = TotalCost_rule( M ) - expr = ( slackcost >= oldobjective ) - return expr +def ActivityObj_rule(M, prev_act_t): + new_act = 0 + for t in M.V_ActivityByTech: + if t in prev_act_t: + new_act += prev_act_t[t] * M.V_ActivityByTech[t] + return new_act -def PreviousAct_rule ( instance, mga_weight, prev_activity_t ): - # The version below weights each technology by its previous cumulative - # activity. However, different sectors may be tracked in different units and - # have activities of very different magnitudes. - epsilon=1e-6 +def SlackedObjective_rule(M, prev_cost, mga_slack): + # It is important that this function name *not* match its constraint name + # plus '_rule', else Pyomo will attempt to be too smart. That is, at the + # first implementation, the associated constraint name is + # 'PreviousSlackedObjective', for which Pyomo searches the namespace for + # 'PreviousSlackedObjective_rule'. We decidedly do not want Pyomo + # trying to call this function because it is not aware of the second arg. + slackcost = (1 + mga_slack) * prev_cost + oldobjective = TotalCost_rule(M) + expr = slackcost >= oldobjective + return expr - if mga_weight == 'integer': - for t in instance.V_ActivityByTech: - if t in instance.tech_mga: - val = value( instance.V_ActivityByTech[t] ) - if abs(val) < epsilon: continue - prev_activity_t[ t ] += 1.0 - return prev_activity_t - - # The version below calculates activity by sector and normalized technology- - # specific activity by the total activity for the sector. Currently accounts - # for electric and transport sectors, but others can be added to the block below. - elif mga_weight == 'normalized': - sectors = set(['electric', 'transport', 'industrial', 'commercial', 'residential']) - act = dict() - techs = {'electric': instance.tech_electric, - 'transport': instance.tech_transport, - 'industrial': instance.tech_industrial, - 'commercial': instance.tech_commercial, - 'residential': instance.tech_residential} - for s in sectors: - if len(techs[s]) > 0: - act[s] = sum( - value( instance.V_ActivityByTech[S_t] ) - for S_t in techs[s] - ) - - for t in instance.V_ActivityByTech: - for s in sectors: - if t in techs[s]: - val = value( instance.V_ActivityByTech[t] ) - if abs(val) < epsilon: continue - prev_activity_t[ t ] += val / act[s] - return prev_activity_t \ No newline at end of file + +def PreviousAct_rule(instance, mga_weight, prev_activity_t): + # The version below weights each technology by its previous cumulative + # activity. However, different sectors may be tracked in different units and + # have activities of very different magnitudes. + + epsilon = 1e-6 + + if mga_weight == "integer": + for t in instance.V_ActivityByTech: + if t in instance.tech_mga: + val = value(instance.V_ActivityByTech[t]) + if abs(val) < epsilon: + continue + prev_activity_t[t] += 1.0 + return prev_activity_t + + # The version below calculates activity by sector and normalized technology- + # specific activity by the total activity for the sector. Currently accounts + # for electric and transport sectors, but others can be added to the block below. + elif mga_weight == "normalized": + sectors = set( + ["electric", "transport", "industrial", "commercial", "residential"] + ) + act = dict() + techs = { + "electric": instance.tech_electric, + "transport": instance.tech_transport, + "industrial": instance.tech_industrial, + "commercial": instance.tech_commercial, + "residential": instance.tech_residential, + } + for s in sectors: + if len(techs[s]) > 0: + act[s] = sum(value(instance.V_ActivityByTech[S_t]) for S_t in techs[s]) + + for t in instance.V_ActivityByTech: + for s in sectors: + if t in techs[s]: + val = value(instance.V_ActivityByTech[t]) + if abs(val) < epsilon: + continue + prev_activity_t[t] += val / act[s] + return prev_activity_t diff --git a/temoa_model/temoa_model.py b/temoa_model/temoa_model.py index a4c14f06..b7701a37 100755 --- a/temoa_model/temoa_model.py +++ b/temoa_model/temoa_model.py @@ -37,12 +37,12 @@ def temoa_create_model(name="Temoa"): M = TemoaModel(name) # --------------------------------------------------------------- - # Define sets. + # Define sets. # Sets are collections of items used to index parameters and variables # --------------------------------------------------------------- # Define time periods - M.time_exist = Set(ordered=True) + M.time_exist = Set(ordered=True) M.time_future = Set(ordered=True) M.time_optimize = Set(ordered=True, initialize=init_set_time_optimize) # Define time period vintages to track capacity installation @@ -58,7 +58,7 @@ def temoa_create_model(name="Temoa"): # Define regions M.regions = Set() - # RegionalIndices is the set of all the possible combinations of interregional + # RegionalIndices is the set of all the possible combinations of interregional # exhanges plus original region indices. If tech_exchange is empty, RegionalIndices =regions. M.RegionalIndices = Set(initialize=CreateRegionalIndices) @@ -75,10 +75,12 @@ def temoa_create_model(name="Temoa"): M.tech_curtailment = Set(within=M.tech_all) M.tech_flex = Set(within=M.tech_all) M.tech_exchange = Set(within=M.tech_all) - M.groups = Set(dimen=1) # Define groups for technologies - M.tech_groups = Set(within=M.tech_all) # Define techs used in groups - M.tech_annual = Set(within=M.tech_all) # Define techs with constant output - M.tech_variable = Set(within=M.tech_all) # Define techs for use with TechInputSplitAverage constraint, where techs have variable annual output but the user wishes to constrain them annually + M.groups = Set(dimen=1) # Define groups for technologies + M.tech_groups = Set(within=M.tech_all) # Define techs used in groups + M.tech_annual = Set(within=M.tech_all) # Define techs with constant output + M.tech_variable = Set( + within=M.tech_all + ) # Define techs for use with TechInputSplitAverage constraint, where techs have variable annual output but the user wishes to constrain them annually # Define commodity-related sets M.commodity_demand = Set() @@ -126,16 +128,20 @@ def temoa_create_model(name="Temoa"): M.Demand = Param(M.regions, M.time_optimize, M.commodity_demand) M.initialize_Demands = BuildAction(rule=CreateDemands) - + M.ResourceBound = Param(M.regions, M.time_optimize, M.commodity_physical) # Define technology performance parameters M.CapacityToActivity = Param(M.RegionalIndices, M.tech_all, default=1) - + M.ExistingCapacity = Param(M.RegionalIndices, M.tech_all, M.vintage_exist) M.Efficiency = Param( - M.RegionalIndices, M.commodity_physical, M.tech_all, M.vintage_all, M.commodity_carrier + M.RegionalIndices, + M.commodity_physical, + M.tech_all, + M.vintage_all, + M.commodity_carrier, ) M.validate_UsedEfficiencyIndices = BuildAction(rule=CheckEfficiencyIndices) @@ -157,9 +163,15 @@ def temoa_create_model(name="Temoa"): M.LifetimeLoanProcess = Param(M.LifetimeLoanProcess_rtv, mutable=True) M.initialize_Lifetimes = BuildAction(rule=CreateLifetimes) - M.TechInputSplit = Param(M.regions, M.time_optimize, M.commodity_physical, M.tech_all) - M.TechInputSplitAverage = Param(M.regions, M.time_optimize, M.commodity_physical, M.tech_variable) - M.TechOutputSplit = Param(M.regions, M.time_optimize, M.tech_all, M.commodity_carrier) + M.TechInputSplit = Param( + M.regions, M.time_optimize, M.commodity_physical, M.tech_all + ) + M.TechInputSplitAverage = Param( + M.regions, M.time_optimize, M.commodity_physical, M.tech_variable + ) + M.TechOutputSplit = Param( + M.regions, M.time_optimize, M.tech_all, M.commodity_carrier + ) # The method below creates a series of helper functions that are used to # perform the sparse matrix of indexing for the parameters, variables, and @@ -182,7 +194,8 @@ def temoa_create_model(name="Temoa"): M.CostVariable = Param(M.CostVariable_rptv, mutable=True) M.CostVariableVintageDefault_rtv = Set( - dimen=3, initialize=lambda M: set((r, t, v) for r, p, t, v in M.CostVariable_rptv) + dimen=3, + initialize=lambda M: set((r, t, v) for r, p, t, v in M.CostVariable_rptv), ) M.CostVariableVintageDefault = Param(M.CostVariableVintageDefault_rtv) @@ -194,12 +207,11 @@ def temoa_create_model(name="Temoa"): M.Loan_rtv = Set(dimen=3, initialize=lambda M: M.CostInvest.keys()) M.LoanAnnualize = Param(M.Loan_rtv, initialize=ParamLoanAnnualize_rule) - M.ModelProcessLife_rptv = Set(dimen=4, initialize=ModelProcessLifeIndices) M.ModelProcessLife = Param( M.ModelProcessLife_rptv, initialize=ParamModelProcessLife_rule ) - + M.ProcessLifeFrac_rptv = Set(dimen=4, initialize=ModelProcessLifeIndices) M.ProcessLifeFrac = Param( M.ProcessLifeFrac_rptv, initialize=ParamProcessLifeFraction_rule @@ -216,17 +228,21 @@ def temoa_create_model(name="Temoa"): M.MinActivity = Param(M.RegionalGlobalIndices, M.time_optimize, M.tech_all) M.GrowthRateMax = Param(M.RegionalIndices, M.tech_all) M.GrowthRateSeed = Param(M.RegionalIndices, M.tech_all) - M.EmissionLimit = Param(M.RegionalGlobalIndices, M.time_optimize, M.commodity_emissions) + M.EmissionLimit = Param( + M.RegionalGlobalIndices, M.time_optimize, M.commodity_emissions + ) M.EmissionActivity_reitvo = Set(dimen=6, initialize=EmissionActivityIndices) M.EmissionActivity = Param(M.EmissionActivity_reitvo) - M.MinGenGroupWeight = Param(M.RegionalIndices, M.tech_groups, M.groups, default = 0) + M.MinGenGroupWeight = Param(M.RegionalIndices, M.tech_groups, M.groups, default=0) M.MinGenGroupTarget = Param(M.time_optimize, M.groups) M.LinkedTechs = Param(M.RegionalIndices, M.tech_all, M.commodity_emissions) # Define parameters associated with electric sector operation M.RampUp = Param(M.regions, M.tech_ramping) M.RampDown = Param(M.regions, M.tech_ramping) - M.CapacityCredit = Param(M.RegionalIndices, M.time_optimize, M.tech_all, M.vintage_all, default=1) + M.CapacityCredit = Param( + M.RegionalIndices, M.time_optimize, M.tech_all, M.vintage_all, default=1 + ) M.PlanningReserveMargin = Param(M.regions, default=0.2) # Storage duration is expressed in hours M.StorageDuration = Param(M.regions, M.tech_storage, default=4) @@ -294,11 +310,15 @@ def temoa_create_model(name="Temoa"): M.CapacityConstraint_rpsdtv = Set(dimen=6, initialize=CapacityConstraintIndices) M.CapacityConstraint = Constraint( - M.CapacityConstraint_rpsdtv, rule=Capacity_Constraint) + M.CapacityConstraint_rpsdtv, rule=Capacity_Constraint + ) - M.CapacityAnnualConstraint_rptv = Set(dimen=4, initialize=CapacityAnnualConstraintIndices) + M.CapacityAnnualConstraint_rptv = Set( + dimen=4, initialize=CapacityAnnualConstraintIndices + ) M.CapacityAnnualConstraint = Constraint( - M.CapacityAnnualConstraint_rptv, rule=CapacityAnnual_Constraint) + M.CapacityAnnualConstraint_rptv, rule=CapacityAnnual_Constraint + ) M.CapacityAvailableByPeriodAndTechConstraint = Constraint( M.CapacityAvailableVar_rpt, rule=CapacityAvailableByPeriodAndTech_Constraint @@ -336,7 +356,7 @@ def temoa_create_model(name="Temoa"): ) M.CommodityBalanceAnnualConstraint = Constraint( M.CommodityBalanceAnnualConstraint_rpc, rule=CommodityBalanceAnnual_Constraint - ) + ) M.ResourceConstraint_rpr = Set( dimen=3, initialize=lambda M: M.ResourceBound.sparse_iterkeys() @@ -356,7 +376,9 @@ def temoa_create_model(name="Temoa"): dimen=4, initialize=RegionalExchangeCapacityConstraintIndices ) M.RegionalExchangeCapacityConstraint = Constraint( - M.RegionalExchangeCapacityConstraint_rrtv, rule=RegionalExchangeCapacity_Constraint) + M.RegionalExchangeCapacityConstraint_rrtv, + rule=RegionalExchangeCapacity_Constraint, + ) # This set works for all the storage-related constraints M.StorageConstraints_rpsdtv = Set(dimen=6, initialize=StorageVariableIndices) @@ -380,7 +402,7 @@ def temoa_create_model(name="Temoa"): M.StorageConstraints_rpsdtv, rule=StorageThroughput_Constraint ) - M.StorageInitConstraint_rtv = Set(dimen=2,initialize=StorageInitConstraintIndices) + M.StorageInitConstraint_rtv = Set(dimen=2, initialize=StorageInitConstraintIndices) M.StorageInitConstraint = Constraint( M.StorageInitConstraint_rtv, rule=StorageInit_Constraint ) @@ -502,14 +524,14 @@ def temoa_create_model(name="Temoa"): M.TechInputSplitAnnualConstraint = Constraint( M.TechInputSplitAnnualConstraint_rpitv, rule=TechInputSplitAnnual_Constraint ) - + M.TechInputSplitAverageConstraint_rpitv = Set( dimen=5, initialize=TechInputSplitAverageConstraintIndices ) M.TechInputSplitAverageConstraint = Constraint( M.TechInputSplitAverageConstraint_rpitv, rule=TechInputSplitAverage_Constraint ) - + M.TechOutputSplitConstraint_rpsdtvo = Set( dimen=7, initialize=TechOutputSplitConstraintIndices ) @@ -523,9 +545,12 @@ def temoa_create_model(name="Temoa"): M.TechOutputSplitAnnualConstraint = Constraint( M.TechOutputSplitAnnualConstraint_rptvo, rule=TechOutputSplitAnnual_Constraint ) - M.LinkedEmissionsTechConstraint_rpsdtve = Set(dimen=7, initialize=LinkedTechConstraintIndices) + M.LinkedEmissionsTechConstraint_rpsdtve = Set( + dimen=7, initialize=LinkedTechConstraintIndices + ) M.LinkedEmissionsTechConstraint = Constraint( - M.LinkedEmissionsTechConstraint_rpsdtve, rule=LinkedEmissionsTech_Constraint) + M.LinkedEmissionsTechConstraint_rpsdtve, rule=LinkedEmissionsTech_Constraint + ) return M @@ -556,4 +581,4 @@ def runModel(): command line as follows: $ python temoa_model/temoa_model.py path/to/dat/file""" dummy = "" # If calling from command line, send empty string - model = runModel() \ No newline at end of file + model = runModel() diff --git a/temoa_model/temoa_myopic.py b/temoa_model/temoa_myopic.py index 93f6e44e..fa1ddc28 100644 --- a/temoa_model/temoa_myopic.py +++ b/temoa_model/temoa_myopic.py @@ -31,54 +31,102 @@ """ import sqlite3 -import pandas as pd +import pandas as pd from shutil import copyfile import os import sys from IPython import embed as IP import io -def myopic_db_generator_solver ( self ): + +def myopic_db_generator_solver(self): global db_path_org db_path_org = self.options.output # original database specified in the ../config_sample file con_org = sqlite3.connect(db_path_org) - cur_org = con_org.cursor() - table_list = cur_org.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'Output%'").fetchall() - time_periods = cur_org.execute("SELECT t_periods FROM time_periods WHERE flag='f'").fetchall() + cur_org = con_org.cursor() + table_list = cur_org.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'Output%'" + ).fetchall() + time_periods = cur_org.execute( + "SELECT t_periods FROM time_periods WHERE flag='f'" + ).fetchall() cur_org.execute("DELETE FROM MyopicBaseyear") - cur_org.execute("INSERT INTO MyopicBaseyear (year) VALUES ("+str(time_periods[0][0])+")") + cur_org.execute( + "INSERT INTO MyopicBaseyear (year) VALUES (" + str(time_periods[0][0]) + ")" + ) con_org.commit() - loc1 = max(loc for loc, val in enumerate(self.options.output) if val == '/' or val=='\\') - loc2 = max(loc for loc, val in enumerate(self.options.output) if val == '.') - db_name = self.options.output[loc1+1:loc2] - copyfile(db_path_org, os.path.join(self.options.path_to_data,db_name)+"_blank"+self.options.output[loc2:]) - - # group 1 consists of non output tables in which "periods" is a column name - tables_group1 = ['CostFixed','CostVariable','Demand','EmissionLimit','MaxActivity','MaxCapacity', \ - 'MinActivity','MinCapacity','TechInputSplit','TechInputSplitAverage','TechOutputSplit','CapacityCredit','MinGenGroupTarget'] + loc1 = max( + loc for loc, val in enumerate(self.options.output) if val == "/" or val == "\\" + ) + loc2 = max(loc for loc, val in enumerate(self.options.output) if val == ".") + db_name = self.options.output[loc1 + 1 : loc2] + copyfile( + db_path_org, + os.path.join(self.options.path_to_data, db_name) + + "_blank" + + self.options.output[loc2:], + ) + + # group 1 consists of non output tables in which "periods" is a column name + tables_group1 = [ + "CostFixed", + "CostVariable", + "Demand", + "EmissionLimit", + "MaxActivity", + "MaxCapacity", + "MinActivity", + "MinCapacity", + "TechInputSplit", + "TechInputSplitAverage", + "TechOutputSplit", + "CapacityCredit", + "MinGenGroupTarget", + ] # group 2 consists of non output tables in which "vintage" is a column name except for CostFixed and CostVariable (taken care of above) - tables_group2 = ['CapacityFactorProcess','CostInvest','DiscountRate', \ - 'Efficiency','EmissionActivity','ExistingCapacity','LifetimeProcess'] - + tables_group2 = [ + "CapacityFactorProcess", + "CostInvest", + "DiscountRate", + "Efficiency", + "EmissionActivity", + "ExistingCapacity", + "LifetimeProcess", + ] + version = int(sys.version[0]) N = self.options.myopic_periods - if 1 <= int(N) <= len(time_periods)-2: + if 1 <= int(N) <= len(time_periods) - 2: N = int(N) else: - print ("Error: The number of myopic years must between 1 and "+str(len(time_periods)-2)) - - for i in range(N-1,len(time_periods)-1): + print( + "Error: The number of myopic years must between 1 and " + + str(len(time_periods) - 2) + ) - print ('Preparing the database for the period(s): '+str([str(time_periods[j][0]) for j in range(i-(N-1),i+1)])) + for i in range(N - 1, len(time_periods) - 1): + print( + "Preparing the database for the period(s): " + + str([str(time_periods[j][0]) for j in range(i - (N - 1), i + 1)]) + ) new_myopic_name = "_myopic" - for j in range(i-(N-1),i+1): - new_myopic_name += "_"+str(time_periods[j][0]) - - new_db_loc = os.path.join(self.options.path_to_data, db_name)+new_myopic_name+self.options.output[loc2:] - copyfile(os.path.join(self.options.path_to_data, db_name) +"_blank"+self.options.output[loc2:], new_db_loc) + for j in range(i - (N - 1), i + 1): + new_myopic_name += "_" + str(time_periods[j][0]) + + new_db_loc = ( + os.path.join(self.options.path_to_data, db_name) + + new_myopic_name + + self.options.output[loc2:] + ) + copyfile( + os.path.join(self.options.path_to_data, db_name) + + "_blank" + + self.options.output[loc2:], + new_db_loc, + ) con = sqlite3.connect(new_db_loc) cur = con.cursor() table_list.sort() @@ -86,137 +134,310 @@ def myopic_db_generator_solver ( self ): # --------------------------------------------------------------- # Start modifying the Efficiency table # --------------------------------------------------------------- - cur.execute("DELETE FROM Efficiency WHERE vintage > "+str(time_periods[i][0])+";") - - cur.execute("UPDATE Efficiency SET tech = TRIM(tech);") #trim spaces. Need to trim carriage return - + cur.execute( + "DELETE FROM Efficiency WHERE vintage > " + str(time_periods[i][0]) + ";" + ) + + cur.execute( + "UPDATE Efficiency SET tech = TRIM(tech);" + ) # trim spaces. Need to trim carriage return + # Delete row from Efficiency if (t,v) retires at the begining of current period (which is time_periods[i][0]) - cur.execute("DELETE FROM Efficiency WHERE tech IN (SELECT tech FROM LifetimeProcess WHERE \ - LifetimeProcess.life_process+LifetimeProcess.vintage<="+str(time_periods[i-(N-1)][0])+") \ + cur.execute( + "DELETE FROM Efficiency WHERE tech IN (SELECT tech FROM LifetimeProcess WHERE \ + LifetimeProcess.life_process+LifetimeProcess.vintage<=" + + str(time_periods[i - (N - 1)][0]) + + ") \ AND vintage IN (SELECT vintage FROM LifetimeProcess WHERE LifetimeProcess.life_process+\ - LifetimeProcess.vintage<="+str(time_periods[i-(N-1)][0])+");") - + LifetimeProcess.vintage<=" + + str(time_periods[i - (N - 1)][0]) + + ");" + ) + # # Delete row from Efficiency if (t,v) retires at the begining of current period (which is time_periods[i][0]) - query = "DELETE FROM Efficiency \ + query = ( + "DELETE FROM Efficiency \ WHERE (Efficiency.regions, input_comm, Efficiency.tech, vintage, output_comm) IN \ (SELECT DISTINCT Efficiency.regions, input_comm, Efficiency.tech, vintage, output_comm \ FROM Efficiency INNER JOIN LifetimeTech ON (LifetimeTech.regions=Efficiency.regions AND LifetimeTech.tech=Efficiency.tech) \ - WHERE Efficiency.vintage + LifetimeTech.life <= "+str(time_periods[i-(N-1)][0])+")\ + WHERE Efficiency.vintage + LifetimeTech.life <= " + + str(time_periods[i - (N - 1)][0]) + + ")\ AND vintage NOT IN (SELECT vintage FROM LifetimeProcess WHERE LifetimeProcess.tech=Efficiency.tech)" + ) cur.execute(query) # If row is not deleted via the last two DELETE commands, it might still be invalid for period - # time_periods[i][0] since they can have model default lifetime of 40 years. - cur.execute("DELETE FROM Efficiency WHERE tech IN (SELECT tech FROM Efficiency WHERE \ - 40+Efficiency.vintage<="+str(time_periods[i-(N-1)][0])+") AND \ + # time_periods[i][0] since they can have model default lifetime of 40 years. + cur.execute( + "DELETE FROM Efficiency WHERE tech IN (SELECT tech FROM Efficiency WHERE \ + 40+Efficiency.vintage<=" + + str(time_periods[i - (N - 1)][0]) + + ") AND \ tech NOT IN (SELECT tech FROM LifetimeTech) AND \ - vintage NOT IN (SELECT vintage FROM LifetimeProcess WHERE LifetimeProcess.tech=Efficiency.tech);") - + vintage NOT IN (SELECT vintage FROM LifetimeProcess WHERE LifetimeProcess.tech=Efficiency.tech);" + ) + # Above commits could break commodity flows defined in the Efficiecny table. We need to delete rows with # output commodities that are not generated by any other process. The exception is demand commodities (flag='d') iterval = 0 - while len(cur.execute("SELECT * FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency)\ - AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');").fetchall()) > 0: - - cur.execute("DELETE FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency) \ - AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');") - iterval+=1 - if iterval>10: + while ( + len( + cur.execute( + "SELECT * FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency)\ + AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');" + ).fetchall() + ) + > 0 + ): + cur.execute( + "DELETE FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency) \ + AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');" + ) + iterval += 1 + if iterval > 10: break - + # --------------------------------------------------------------- - # Sufficient changes were made on the Efficiency table. + # Sufficient changes were made on the Efficiency table. # Start modifying other tables. # --------------------------------------------------------------- for table in tables_group1: if table in [x[0] for x in table_list]: - cur.execute("DELETE FROM "+table +" WHERE periods > "+str(time_periods[i][0])+" OR periods < "+str(time_periods[i-(N-1)][0])+";") - + cur.execute( + "DELETE FROM " + + table + + " WHERE periods > " + + str(time_periods[i][0]) + + " OR periods < " + + str(time_periods[i - (N - 1)][0]) + + ";" + ) for table in tables_group2: if table in [x[0] for x in table_list]: - if table == 'CostInvest' or table == 'DiscountRate': - cur.execute("UPDATE "+table+" SET tech = TRIM(tech);") - cur.execute("DELETE FROM "+table +" WHERE vintage > "+str(time_periods[i][0])+";") + if table == "CostInvest" or table == "DiscountRate": + cur.execute("UPDATE " + table + " SET tech = TRIM(tech);") + cur.execute( + "DELETE FROM " + + table + + " WHERE vintage > " + + str(time_periods[i][0]) + + ";" + ) else: - cur.execute("DELETE FROM "+table +" WHERE vintage > "+str(time_periods[i][0])+";") + cur.execute( + "DELETE FROM " + + table + + " WHERE vintage > " + + str(time_periods[i][0]) + + ";" + ) # time_periods is the only non output table with "t_periods" as a column - cur.execute("DELETE FROM time_periods WHERE t_periods > "+str(time_periods[i][0])+";") + cur.execute( + "DELETE FROM time_periods WHERE t_periods > " + + str(time_periods[i][0]) + + ";" + ) - # --------------------------------------------------------------- # Ensure that linked technologies appear in the Output data tables - # only if the primary technologies appear as well. + # only if the primary technologies appear as well. # Check Output_CapacityByPeriodAndTech and Output_V_Capacity in con_org # --------------------------------------------------------------- - dict_table_column = {} - dict_table_column['Output_CapacityByPeriodAndTech'] = 't_periods' - dict_table_column['Output_V_Capacity'] = 'vintage' - - for table in ['Output_CapacityByPeriodAndTech', 'Output_V_Capacity']: - if i!=(N-1): - #delete primary_techs where capacity is a small negative value - query = "DELETE FROM " + table + " \ - WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ + dict_table_column["Output_CapacityByPeriodAndTech"] = "t_periods" + dict_table_column["Output_V_Capacity"] = "vintage" + + for table in ["Output_CapacityByPeriodAndTech", "Output_V_Capacity"]: + if i != (N - 1): + # delete primary_techs where capacity is a small negative value + query = ( + "DELETE FROM " + + table + + " \ + WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ tech in (SELECT primary_tech FROM LinkedTechs) AND \ - capacity < 0 AND " + dict_table_column[table] + " = "+str(time_periods[j-1][0])+";" + capacity < 0 AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + + ";" + ) cur_org.execute(query) - #if linked tech exists but primary tech does not - df_linkedtechs = pd.read_sql_query("SELECT * FROM " + table + " \ - WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech in (SELECT linked_tech FROM LinkedTechs) AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0])+";", con_org) + # if linked tech exists but primary tech does not + df_linkedtechs = pd.read_sql_query( + "SELECT * FROM " + + table + + " \ + WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech in (SELECT linked_tech FROM LinkedTechs) AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + + ";", + con_org, + ) for ind, row in df_linkedtechs.iterrows(): - primary_tech = pd.read_sql_query("SELECT primary_tech FROM LinkedTechs WHERE primary_region = '" + row['regions'] + \ - "' AND linked_tech = '" + row['tech'] + "'", con_org) - primary_tech = primary_tech['primary_tech'].values[0] - df_primary_tech = pd.read_sql_query("SELECT * FROM " + table + " \ - WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech = '" + primary_tech + "' AND \ - regions = '" + row['regions'] + "' AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0])+";", con_org) - if len(df_primary_tech)==0: - query = "DELETE FROM " + table + " WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech = '" + row['tech'] + "' AND regions = '" + row['regions'] + "' AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0]) + primary_tech = pd.read_sql_query( + "SELECT primary_tech FROM LinkedTechs WHERE primary_region = '" + + row["regions"] + + "' AND linked_tech = '" + + row["tech"] + + "'", + con_org, + ) + primary_tech = primary_tech["primary_tech"].values[0] + df_primary_tech = pd.read_sql_query( + "SELECT * FROM " + + table + + " \ + WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech = '" + + primary_tech + + "' AND \ + regions = '" + + row["regions"] + + "' AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + + ";", + con_org, + ) + if len(df_primary_tech) == 0: + query = ( + "DELETE FROM " + + table + + " WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech = '" + + row["tech"] + + "' AND regions = '" + + row["regions"] + + "' AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + ) cur_org.execute(query) - if table=='Output_V_Capacity': - for aux_table in ['Output_VFlow_Out', 'Output_VFlow_In']: - query = "DELETE FROM " + aux_table + " WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech = '" + row['tech'] + "' AND regions = '" + row['regions'] + "' AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0]) + if table == "Output_V_Capacity": + for aux_table in ["Output_VFlow_Out", "Output_VFlow_In"]: + query = ( + "DELETE FROM " + + aux_table + + " WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech = '" + + row["tech"] + + "' AND regions = '" + + row["regions"] + + "' AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + ) cur_org.execute(query) - - #if primary tech exists but linked tech does not - df_primarytechs = pd.read_sql_query("SELECT * FROM " + table + " \ - WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech in (SELECT primary_tech FROM LinkedTechs) AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0])+";", con_org) + # if primary tech exists but linked tech does not + df_primarytechs = pd.read_sql_query( + "SELECT * FROM " + + table + + " \ + WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech in (SELECT primary_tech FROM LinkedTechs) AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + + ";", + con_org, + ) for ind, row in df_primarytechs.iterrows(): - linked_tech = pd.read_sql_query("SELECT linked_tech FROM LinkedTechs WHERE primary_region = '" + row['regions'] + \ - "' AND primary_tech = '" + row['tech'] + "'", con_org) - linked_tech = linked_tech['linked_tech'].values[0] - df_linked_tech = pd.read_sql_query("SELECT * FROM " + table + " \ - WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech = '" + linked_tech + "' AND \ - regions = '" + row['regions'] + "' AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0])+";", con_org) - if len(df_linked_tech)==0: - query = "DELETE FROM " + table + " WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech = '" + row['tech'] + "' AND regions = '" + row['regions'] + "' AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0]) + linked_tech = pd.read_sql_query( + "SELECT linked_tech FROM LinkedTechs WHERE primary_region = '" + + row["regions"] + + "' AND primary_tech = '" + + row["tech"] + + "'", + con_org, + ) + linked_tech = linked_tech["linked_tech"].values[0] + df_linked_tech = pd.read_sql_query( + "SELECT * FROM " + + table + + " \ + WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech = '" + + linked_tech + + "' AND \ + regions = '" + + row["regions"] + + "' AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + + ";", + con_org, + ) + if len(df_linked_tech) == 0: + query = ( + "DELETE FROM " + + table + + " WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech = '" + + row["tech"] + + "' AND regions = '" + + row["regions"] + + "' AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + ) cur_org.execute(query) - if table=='Output_V_Capacity': - for aux_table in ['Output_VFlow_Out', 'Output_VFlow_In']: - query = "DELETE FROM " + aux_table + " WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - tech = '" + row['tech'] + "' AND regions = '" + row['regions'] + "' AND " + \ - dict_table_column[table] + " = "+str(time_periods[j-1][0]) + if table == "Output_V_Capacity": + for aux_table in ["Output_VFlow_Out", "Output_VFlow_In"]: + query = ( + "DELETE FROM " + + aux_table + + " WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + tech = '" + + row["tech"] + + "' AND regions = '" + + row["regions"] + + "' AND " + + dict_table_column[table] + + " = " + + str(time_periods[j - 1][0]) + ) cur_org.execute(query) # --------------------------------------------------------------- @@ -224,142 +445,305 @@ def myopic_db_generator_solver ( self ): # table. The data is stored in the Output_V_Capacity of the con_org # --------------------------------------------------------------- - if i!=(N-1): - df_new_ExistingCapacity = pd.read_sql_query("SELECT regions, tech, vintage, capacity FROM Output_V_Capacity \ - WHERE scenario="+"'"+str(self.options.scenario)+"' AND \ - vintage < "+str(time_periods[i-(N-1)][0])+";", con_org) - df_new_ExistingCapacity.columns = ['regions','tech','vintage','exist_cap'] - df_new_ExistingCapacity.to_sql('ExistingCapacity',con, if_exists='append', index=False) - - #Create a copy of the first time period vintages for the two current vintage - #to prevent infeasibility (if it is not an 'existing' vintage in the - #original database and if it doesn't already have a current vintage). One example: + if i != (N - 1): + df_new_ExistingCapacity = pd.read_sql_query( + "SELECT regions, tech, vintage, capacity FROM Output_V_Capacity \ + WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + vintage < " + + str(time_periods[i - (N - 1)][0]) + + ";", + con_org, + ) + df_new_ExistingCapacity.columns = [ + "regions", + "tech", + "vintage", + "exist_cap", + ] + df_new_ExistingCapacity.to_sql( + "ExistingCapacity", con, if_exists="append", index=False + ) + + # Create a copy of the first time period vintages for the two current vintage + # to prevent infeasibility (if it is not an 'existing' vintage in the + # original database and if it doesn't already have a current vintage). One example: # dummy technologies that have only the first time period vintage (p0) - for j in range(N-1,-1,-1): #backward loop - cur.execute("INSERT INTO Efficiency (regions,input_comm,tech,vintage,output_comm,efficiency) \ - SELECT DISTINCT regions,input_comm,tech,"+str(time_periods[i-j][0])+ \ - ",output_comm,efficiency FROM Efficiency WHERE tech NOT IN (SELECT tech \ - FROM Efficiency WHERE vintage < "+str(time_periods[0][0])+") AND tech NOT IN (SELECT \ - tech FROM Efficiency WHERE vintage >= "+str(time_periods[i-j][0])+");") - + for j in range(N - 1, -1, -1): # backward loop + cur.execute( + "INSERT INTO Efficiency (regions,input_comm,tech,vintage,output_comm,efficiency) \ + SELECT DISTINCT regions,input_comm,tech," + + str(time_periods[i - j][0]) + + ",output_comm,efficiency FROM Efficiency WHERE tech NOT IN (SELECT tech \ + FROM Efficiency WHERE vintage < " + + str(time_periods[0][0]) + + ") AND tech NOT IN (SELECT \ + tech FROM Efficiency WHERE vintage >= " + + str(time_periods[i - j][0]) + + ");" + ) + # delete (t,v) from efficiecny table if it doesn't appear in the ExistingCapacity (v is an existing vintage). # (note that the model throws a warning if (t,v) is an existing vintage but it doesn't appear in ExistingCapacity) - cur.execute("DELETE FROM Efficiency \ - WHERE vintage <= "+str(time_periods[i-N][0])+" AND vintage NOT IN (SELECT \ - vintage FROM ExistingCapacity WHERE Efficiency.tech=ExistingCapacity.tech AND Efficiency.regions=ExistingCapacity.regions);") + cur.execute( + "DELETE FROM Efficiency \ + WHERE vintage <= " + + str(time_periods[i - N][0]) + + " AND vintage NOT IN (SELECT \ + vintage FROM ExistingCapacity WHERE Efficiency.tech=ExistingCapacity.tech AND Efficiency.regions=ExistingCapacity.regions);" + ) iterval = 0 - while len(cur.execute("SELECT * FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency)\ - AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');").fetchall()) > 0: - - cur.execute("DELETE FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency) \ - AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');") - iterval+=1 - if iterval>10: - break - + while ( + len( + cur.execute( + "SELECT * FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency)\ + AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');" + ).fetchall() + ) + > 0 + ): + cur.execute( + "DELETE FROM Efficiency WHERE output_comm NOT IN (SELECT input_comm FROM Efficiency) \ + AND output_comm NOT IN (SELECT comm_name FROM commodities WHERE flag='d');" + ) + iterval += 1 + if iterval > 10: + break + # Discard the results associated with time_periods[i-N][0] P time_periods[i-N][0] period in rolling horizon fashion. Otherwise, UNIQUE CONSTRAINT error is thrown. # Re Output_Costs, a delete is not needed because in pformat_results.py, future periods costs get added to what is already in the table - cur_org.execute("DELETE FROM Output_CapacityByPeriodAndTech WHERE scenario="+"'"+str(self.options.scenario)+"' AND t_periods>"+str(time_periods[i-N][0])) - cur_org.execute("DELETE FROM Output_Emissions WHERE scenario="+"'"+str(self.options.scenario)+"' AND t_periods>"+str(time_periods[i-N][0])) - cur_org.execute("DELETE FROM Output_VFlow_In WHERE scenario="+"'"+str(self.options.scenario)+"' AND t_periods>"+str(time_periods[i-N][0])) - cur_org.execute("DELETE FROM Output_VFlow_Out WHERE scenario="+"'"+str(self.options.scenario)+"' AND t_periods>"+str(time_periods[i-N][0])) - cur_org.execute("DELETE FROM Output_V_Capacity WHERE scenario="+"'"+str(self.options.scenario)+"' AND vintage>"+str(time_periods[i-N][0])) - cur_org.execute("DELETE FROM Output_Curtailment WHERE scenario="+"'"+str(self.options.scenario)+"' AND t_periods>"+str(time_periods[i-N][0])) + cur_org.execute( + "DELETE FROM Output_CapacityByPeriodAndTech WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND t_periods>" + + str(time_periods[i - N][0]) + ) + cur_org.execute( + "DELETE FROM Output_Emissions WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND t_periods>" + + str(time_periods[i - N][0]) + ) + cur_org.execute( + "DELETE FROM Output_VFlow_In WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND t_periods>" + + str(time_periods[i - N][0]) + ) + cur_org.execute( + "DELETE FROM Output_VFlow_Out WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND t_periods>" + + str(time_periods[i - N][0]) + ) + cur_org.execute( + "DELETE FROM Output_V_Capacity WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND vintage>" + + str(time_periods[i - N][0]) + ) + cur_org.execute( + "DELETE FROM Output_Curtailment WHERE scenario=" + + "'" + + str(self.options.scenario) + + "' AND t_periods>" + + str(time_periods[i - N][0]) + ) con_org.commit() # --------------------------------------------------------------- # The Efficiency table is now ready. Continue modifying other tables. # --------------------------------------------------------------- for table in table_list: - if table[0] == 'Efficiency': continue + if table[0] == "Efficiency": + continue try: - if table[0]=='LinkedTechs': - cur.execute("DELETE FROM LinkedTechs WHERE primary_tech NOT IN (SELECT DISTINCT(tech) FROM Efficiency)") - cur.execute("DELETE FROM LinkedTechs WHERE linked_tech NOT IN (SELECT DISTINCT(tech) FROM Efficiency)") - cur.execute("UPDATE "+str(table[0])+" SET tech = TRIM(tech, CHAR(37,10));") - # If t doesn't exist in Efficiency table after the deletions made above, - # it is deleted from other tables. - cur.execute("DELETE FROM "+str(table[0])+" WHERE tech NOT IN (SELECT tech FROM Efficiency);") - cursor = con.execute("SELECT * FROM "+str(table[0])) + if table[0] == "LinkedTechs": + cur.execute( + "DELETE FROM LinkedTechs WHERE primary_tech NOT IN (SELECT DISTINCT(tech) FROM Efficiency)" + ) + cur.execute( + "DELETE FROM LinkedTechs WHERE linked_tech NOT IN (SELECT DISTINCT(tech) FROM Efficiency)" + ) + cur.execute( + "UPDATE " + str(table[0]) + " SET tech = TRIM(tech, CHAR(37,10));" + ) + # If t doesn't exist in Efficiency table after the deletions made above, + # it is deleted from other tables. + cur.execute( + "DELETE FROM " + + str(table[0]) + + " WHERE tech NOT IN (SELECT tech FROM Efficiency);" + ) + cursor = con.execute("SELECT * FROM " + str(table[0])) names = list(map(lambda x: x[0], cursor.description)) - if 'regions' in names: - query = "DELETE FROM "+str(table[0])+" WHERE (regions, tech) NOT IN (SELECT DISTINCT regions, tech FROM Efficiency) \ + if "regions" in names: + query = ( + "DELETE FROM " + + str(table[0]) + + " WHERE (regions, tech) NOT IN (SELECT DISTINCT regions, tech FROM Efficiency) \ AND regions!='global'" + ) cur.execute(query) - query = "DELETE FROM "+str(table[0])+" WHERE tech NOT IN (SELECT tech FROM Efficiency) \ + query = ( + "DELETE FROM " + + str(table[0]) + + " WHERE tech NOT IN (SELECT tech FROM Efficiency) \ AND regions='global'" + ) cur.execute(query) - - if 'vintage' in names: - if table[0]!='ExistingCapacity': - for j in range(N-1,-1,-1): + + if "vintage" in names: + if table[0] != "ExistingCapacity": + for j in range(N - 1, -1, -1): names = list(map(lambda x: x[0], cursor.description)) - names = [str(time_periods[i-j][0]) if x=='vintage' else x for x in names] - query = "SELECT DISTINCT "+",".join(names)+\ - " FROM "+table[0]+" WHERE tech NOT IN (SELECT tech FROM "+table[0]+\ - " WHERE vintage<"+str(time_periods[0][0])+") AND tech NOT IN (SELECT tech FROM "+\ - table[0]+" WHERE vintage >= "+str(time_periods[i-j][0])+");" + names = [ + str(time_periods[i - j][0]) if x == "vintage" else x + for x in names + ] + query = ( + "SELECT DISTINCT " + + ",".join(names) + + " FROM " + + table[0] + + " WHERE tech NOT IN (SELECT tech FROM " + + table[0] + + " WHERE vintage<" + + str(time_periods[0][0]) + + ") AND tech NOT IN (SELECT tech FROM " + + table[0] + + " WHERE vintage >= " + + str(time_periods[i - j][0]) + + ");" + ) df_table = cur.execute(query).fetchall() - if df_table == []: continue + if df_table == []: + continue df_table = pd.read_sql_query(query, con) - if table[0] == 'EmissionActivity': - filter_list = names[:names.index(str(time_periods[i-j][0]))+2] + if table[0] == "EmissionActivity": + filter_list = names[ + : names.index(str(time_periods[i - j][0])) + 2 + ] else: - filter_list = names[:names.index(str(time_periods[i-j][0]))+1] - df_table = df_table.drop_duplicates(subset=filter_list, keep='last') - df_table.columns = ['vintage' if x==str(time_periods[i-j][0]) else x for x in df_table.columns] - df_table.to_sql(str(table[0]),con, if_exists='append', index=False) - - # For these two table we only want current vintages. - if table[0] == 'CostInvest' or table[0] == 'DiscountRate': - cur.execute("DELETE FROM "+str(table[0])+" WHERE vintage > "+str(time_periods[i][0])+" OR vintage < "+str(time_periods[i-(N-1)][0])+";") - if table[0] == 'CostVariable' or table[0] == 'CostFixed': - cur.execute("DELETE FROM "+str(table[0])+" WHERE periods < vintage;") + filter_list = names[ + : names.index(str(time_periods[i - j][0])) + 1 + ] + df_table = df_table.drop_duplicates( + subset=filter_list, keep="last" + ) + df_table.columns = [ + "vintage" if x == str(time_periods[i - j][0]) else x + for x in df_table.columns + ] + df_table.to_sql( + str(table[0]), con, if_exists="append", index=False + ) + + # For these two table we only want current vintages. + if table[0] == "CostInvest" or table[0] == "DiscountRate": + cur.execute( + "DELETE FROM " + + str(table[0]) + + " WHERE vintage > " + + str(time_periods[i][0]) + + " OR vintage < " + + str(time_periods[i - (N - 1)][0]) + + ";" + ) + if table[0] == "CostVariable" or table[0] == "CostFixed": + cur.execute( + "DELETE FROM " + str(table[0]) + " WHERE periods < vintage;" + ) # If (t,v) is not found in the Efficiecny table, deelte it from all the other tables # For the EmissionActivity, (i,t,v,o) tuple must be checked. - if table[0] == 'EmissionActivity': - cur.execute("DELETE FROM EmissionActivity WHERE regions || input_comm || tech || vintage || output_comm \ - NOT IN (SELECT regions || input_comm || tech || vintage || output_comm FROM Efficiency)") + if table[0] == "EmissionActivity": + cur.execute( + "DELETE FROM EmissionActivity WHERE regions || input_comm || tech || vintage || output_comm \ + NOT IN (SELECT regions || input_comm || tech || vintage || output_comm FROM Efficiency)" + ) else: - cur.execute("DELETE FROM "+str(table[0])+" WHERE tech IN (SELECT tech FROM Efficiency) AND vintage \ - NOT IN (SELECT vintage FROM Efficiency WHERE Efficiency.tech="+str(table[0])+".tech \ - AND Efficiency.regions="+str(table[0])+".regions);") - #except: + cur.execute( + "DELETE FROM " + + str(table[0]) + + " WHERE tech IN (SELECT tech FROM Efficiency) AND vintage \ + NOT IN (SELECT vintage FROM Efficiency WHERE Efficiency.tech=" + + str(table[0]) + + ".tech \ + AND Efficiency.regions=" + + str(table[0]) + + ".regions);" + ) + # except: # raise Exception(table[0],j) except: - pass + pass - cur.execute("UPDATE commodities SET comm_name = TRIM(comm_name, CHAR(10,13,37))") + cur.execute( + "UPDATE commodities SET comm_name = TRIM(comm_name, CHAR(10,13,37))" + ) # delete unused commodities otherwise the model throws an error - cur.execute("DELETE FROM commodities WHERE flag!='e' AND comm_name NOT IN (SELECT input_comm from Efficiency UNION SELECT output_comm from Efficiency);") - cur.execute("INSERT INTO `time_periods` (t_periods,flag) VALUES ("+str(time_periods[i+1][0])+",'f');") - cur.execute("UPDATE `time_periods` SET flag='e' WHERE t_periods < "+str(time_periods[i-(N-1)][0])) - - + cur.execute( + "DELETE FROM commodities WHERE flag!='e' AND comm_name NOT IN (SELECT input_comm from Efficiency UNION SELECT output_comm from Efficiency);" + ) + cur.execute( + "INSERT INTO `time_periods` (t_periods,flag) VALUES (" + + str(time_periods[i + 1][0]) + + ",'f');" + ) + cur.execute( + "UPDATE `time_periods` SET flag='e' WHERE t_periods < " + + str(time_periods[i - (N - 1)][0]) + ) # -------------------------------------------------------------------------------------------------- # Update the maximum resource table to include flows that already contribute to resource consumption # -------------------------------------------------------------------------------------------------- - if (i!=(N-1)) & ('MaxResource' in [x[0] for x in table_list]): - resource_constraints_org = pd.read_sql_query("SELECT regions, tech, maxres FROM MaxResource", con_org) - for ind,row in resource_constraints_org.iterrows(): - df_existing_resources = pd.read_sql_query("SELECT sum(vflow_out) FROM Output_VFlow_Out \ - WHERE regions='" + row['regions'] + "' AND \ - tech='" + row['tech'] + "' AND \ - scenario="+"'"+str(self.options.scenario)+"' AND \ - vintage < "+str(time_periods[i-(N-1)][0]), con_org) - try: - updated_resource = row['maxres'] - df_existing_resources.iloc[0,0] - query = "UPDATE MaxResource SET maxres=" + str(updated_resource) + " WHERE regions='"\ - + row['regions'] + "' AND tech='" + row['tech'] + "'" + if (i != (N - 1)) & ("MaxResource" in [x[0] for x in table_list]): + resource_constraints_org = pd.read_sql_query( + "SELECT regions, tech, maxres FROM MaxResource", con_org + ) + for ind, row in resource_constraints_org.iterrows(): + df_existing_resources = pd.read_sql_query( + "SELECT sum(vflow_out) FROM Output_VFlow_Out \ + WHERE regions='" + + row["regions"] + + "' AND \ + tech='" + + row["tech"] + + "' AND \ + scenario=" + + "'" + + str(self.options.scenario) + + "' AND \ + vintage < " + + str(time_periods[i - (N - 1)][0]), + con_org, + ) + try: + updated_resource = row["maxres"] - df_existing_resources.iloc[0, 0] + query = ( + "UPDATE MaxResource SET maxres=" + + str(updated_resource) + + " WHERE regions='" + + row["regions"] + + "' AND tech='" + + row["tech"] + + "'" + ) cur.execute(query) except: pass - con.commit() con.close() @@ -372,34 +756,54 @@ def myopic_db_generator_solver ( self ): con.commit() con.close() # --------------------------------------------------------------- - # the database is ready. It is run via a temporary config file in + # the database is ready. It is run via a temporary config file in # a perfect foresight fashion. # --------------------------------------------------------------- - new_config = os.path.join(os.getcwd(), "temoa_model", "config_sample")+new_myopic_name - if version<3: - ifile = io.open(os.path.join(os.getcwd(), "temoa_model", "config_sample"), encoding='utf-8') + new_config = ( + os.path.join(os.getcwd(), "temoa_model", "config_sample") + new_myopic_name + ) + if version < 3: + ifile = io.open( + os.path.join(os.getcwd(), "temoa_model", "config_sample"), + encoding="utf-8", + ) else: - ifile = open(os.path.join(os.getcwd(), "temoa_model", "config_sample"), encoding='utf-8') + ifile = open( + os.path.join(os.getcwd(), "temoa_model", "config_sample"), + encoding="utf-8", + ) - ofile = open(new_config,'w') + ofile = open(new_config, "w") for line in ifile: - new_line = line.replace("--input=data_files/"+db_name, "--input=data_files/"+db_name+new_myopic_name) - # the temporary config file is created from the original config file. Since for individual periods we are - # going to have a standard run, '--rollinghorizon' needs to be commented out. - new_line = new_line.replace("--myopic","#--myopic") - if version<3: - ofile.write(new_line.encode('utf-8')) + new_line = line.replace( + "--input=data_files/" + db_name, + "--input=data_files/" + db_name + new_myopic_name, + ) + # the temporary config file is created from the original config file. Since for individual periods we are + # going to have a standard run, '--rollinghorizon' needs to be commented out. + new_line = new_line.replace("--myopic", "#--myopic") + if version < 3: + ofile.write(new_line.encode("utf-8")) else: ofile.write(new_line) ifile.close() ofile.close() - os.system("python temoa_model/ --config=temoa_model/config_sample"+new_myopic_name) + os.system( + "python temoa_model/ --config=temoa_model/config_sample" + new_myopic_name + ) # delete the temporary config file os.remove(new_config) if not self.options.KeepMyopicDBs: os.remove(new_db_loc) - os.remove(os.path.join(self.options.path_to_data, db_name) +new_myopic_name+".dat") - - - os.remove(os.path.join(self.options.path_to_data,db_name)+"_blank"+self.options.output[loc2:]) + os.remove( + os.path.join(self.options.path_to_data, db_name) + + new_myopic_name + + ".dat" + ) + + os.remove( + os.path.join(self.options.path_to_data, db_name) + + "_blank" + + self.options.output[loc2:] + ) diff --git a/temoa_model/temoa_rules.py b/temoa_model/temoa_rules.py index 64cd401e..8a04598a 100644 --- a/temoa_model/temoa_rules.py +++ b/temoa_model/temoa_rules.py @@ -30,6 +30,7 @@ # and constraints below. # --------------------------------------------------------------- + def Capacity_Constraint(M, r, p, s, d, t, v): r""" This constraint ensures that the capacity of a given process is sufficient @@ -70,27 +71,34 @@ def Capacity_Constraint(M, r, p, s, d, t, v): # expression cloning taking place with Pyomo. useful_activity = sum( - M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] - for S_i in M.processInputs[r, p, t, v] - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] + M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) if t in M.tech_curtailment: # If technologies are present in the curtailment set, then enough # capacity must be available to cover both activity and curtailment. - return value(M.CapacityFactorProcess[r, s, d, t, v]) \ - * value(M.CapacityToActivity[r, t]) * value(M.SegFrac[s, d]) \ - * value(M.ProcessLifeFrac[r, p, t, v]) \ - * M.V_Capacity[r, t, v] == useful_activity + sum( \ - M.V_Curtailment[r, p, s, d, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i]) + return value(M.CapacityFactorProcess[r, s, d, t, v]) * value( + M.CapacityToActivity[r, t] + ) * value(M.SegFrac[s, d]) * value( + M.ProcessLifeFrac[r, p, t, v] + ) * M.V_Capacity[ + r, t, v + ] == useful_activity + sum( + M.V_Curtailment[r, p, s, d, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] + ) else: - return value(M.CapacityFactorProcess[r, s, d, t, v]) \ - * value(M.CapacityToActivity[r, t]) \ - * value(M.SegFrac[s, d]) \ - * value(M.ProcessLifeFrac[r, p, t, v]) \ - * M.V_Capacity[r, t, v] >= useful_activity + return ( + value(M.CapacityFactorProcess[r, s, d, t, v]) + * value(M.CapacityToActivity[r, t]) + * value(M.SegFrac[s, d]) + * value(M.ProcessLifeFrac[r, p, t, v]) + * M.V_Capacity[r, t, v] + >= useful_activity + ) def CapacityAnnual_Constraint(M, r, p, t, v): @@ -119,7 +127,7 @@ def CapacityAnnual_Constraint(M, r, p, t, v): """ - CF = 1 #placeholder CF + CF = 1 # placeholder CF activity_rptv = sum( M.V_FlowOutAnnual[r, p, S_i, t, v, S_o] @@ -127,48 +135,46 @@ def CapacityAnnual_Constraint(M, r, p, t, v): for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) - return CF \ - * value(M.CapacityToActivity[r, t]) \ - * value(M.ProcessLifeFrac[r, p, t, v]) \ - * M.V_Capacity[r, t, v] >= activity_rptv + return ( + CF + * value(M.CapacityToActivity[r, t]) + * value(M.ProcessLifeFrac[r, p, t, v]) + * M.V_Capacity[r, t, v] + >= activity_rptv + ) def ActivityByTech_Constraint(M, t): r""" -This constraint is utilized by the MGA objective function and defines -the total activity of a technology over the planning horizon. The first version -below applies to technologies with variable output at the timeslice level, -and the second version applies to technologies with constant annual output -in the :code:`tech_annual` set. - -.. math:: - :label: ActivityByTech - - \textbf{ACT}_{t} = \sum_{R, P, S, D, I, V, O} \textbf{FO}_{r, p, s, d,i, t, v, o} - \; - \forall t \not\in T^{a} - - \textbf{ACT}_{t} = \sum_{R, P, I, V, O} \textbf{FOA}_{r, p, i, t, v, o} - \; - \forall t \in T^{a} - -""" + This constraint is utilized by the MGA objective function and defines + the total activity of a technology over the planning horizon. The first version + below applies to technologies with variable output at the timeslice level, + and the second version applies to technologies with constant annual output + in the :code:`tech_annual` set. + + .. math:: + :label: ActivityByTech + + \textbf{ACT}_{t} = \sum_{R, P, S, D, I, V, O} \textbf{FO}_{r, p, s, d,i, t, v, o} + \; + \forall t \not\in T^{a} + + \textbf{ACT}_{t} = \sum_{R, P, I, V, O} \textbf{FOA}_{r, p, i, t, v, o} + \; + \forall t \in T^{a} + """ if t not in M.tech_annual: indices = [] for s_index in M.FlowVar_rpsditvo: if t in s_index: indices.append(s_index) - activity = sum( M.V_FlowOut[s_index] - for s_index in indices - ) + activity = sum(M.V_FlowOut[s_index] for s_index in indices) else: indices = [] for s_index in M.FlowVarAnnual_rpitvo: if t in s_index: indices.append(s_index) - activity = sum( M.V_FlowOutAnnual[s_index] - for s_index in indices - ) + activity = sum(M.V_FlowOutAnnual[s_index] for s_index in indices) if int is type(activity): return Constraint.Skip @@ -207,102 +213,102 @@ def CapacityAvailableByPeriodAndTech_Constraint(M, r, p, t): expr = M.V_CapacityAvailableByPeriodAndTech[r, p, t] == cap_avail return expr + def ExistingCapacity_Constraint(M, r, t, v): r""" -Temoa treats existing capacity installed prior to the beginning of the model's -optimization horizon as regular processes that require the same parameter -specification as do new vintage technologies, except for the :code:`CostInvest` -parameter. This constraint sets the capacity of processes for model periods -that exist prior to the optimization horizon to user-specified values. + Temoa treats existing capacity installed prior to the beginning of the model's + optimization horizon as regular processes that require the same parameter + specification as do new vintage technologies, except for the :code:`CostInvest` + parameter. This constraint sets the capacity of processes for model periods + that exist prior to the optimization horizon to user-specified values. -.. math:: - :label: ExistingCapacity + .. math:: + :label: ExistingCapacity - \textbf{CAP}_{r, t, v} = ECAP_{r, t, v} + \textbf{CAP}_{r, t, v} = ECAP_{r, t, v} - \forall \{r, t, v\} \in \Theta_{\text{ExistingCapacity}} -""" + \forall \{r, t, v\} \in \Theta_{\text{ExistingCapacity}}""" expr = M.V_Capacity[r, t, v] == M.ExistingCapacity[r, t, v] return expr + # --------------------------------------------------------------- # Define the Objective Function # --------------------------------------------------------------- def TotalCost_rule(M): r""" -Using the :code:`FlowOut` and :code:`Capacity` variables, the Temoa objective -function calculates the cost of energy supply, under the assumption that capital -costs are paid through loans. This implementation sums up all the costs incurred, -and is defined as :math:`C_{tot} = C_{loans} + C_{fixed} + C_{variable}`. Each -term on the right-hand side represents the cost incurred over the model -time horizon and discounted to the initial year in the horizon (:math:`{P}_0`). -The calculation of each term is given below. - -.. math:: - :label: obj_loan - - C_{loans} = \sum_{r, t, v \in \Theta_{IC}} \left ( - \left [ - CI_{r, t, v} \cdot LA_{r, t, v} - \cdot \frac{(1 + GDR)^{P_0 - v +1} \cdot (1 - (1 + GDR)^{-LLP_{r, t, v}})}{GDR} \right. \right. - \\ \left. \left. \cdot \frac{ 1-(1+GDR)^{-LPA_{r,t,v}} }{ 1-(1+GDR)^{-LTP_{r,t,v}} } - \right ] - \cdot \textbf{CAP}_{r, t, v} - \right ) - -Note that capital costs (:math:`{IC}_{r,t,v}`) are handled in several steps. First, each capital cost -is amortized using the loan rate (i.e., technology-specific discount rate) and loan -period. Second, the annual stream of payments is converted into a lump sum using -the global discount rate and loan period. Third, the new lump sum is amortized -at the global discount rate and technology lifetime. Fourth, loan payments beyond -the model time horizon are removed and the lump sum recalculated. The terms used -in Steps 3-4 are :math:`\frac{ GDR }{ 1-(1+GDR)^{-LTP_{r,t,v} } }\cdot -\frac{ 1-(1+GDR)^{-LPA_{t,v}} }{ GDR }`. The product simplifies to -:math:`\frac{ 1-(1+GDR)^{-LPA_{r,t,v}} }{ 1-(1+GDR)^{-LTP_{r,t,v}} }`, where -:math:`LPA_{r,t,v}` represents the active lifetime of process t in region r :math:`(r,t,v)` -before the end of the model horizon, and :math:`LTP_{r,t,v}` represents the full -lifetime of a regional process :math:`(r,t,v)`. Fifth, the lump sum is discounted back to the -beginning of the horizon (:math:`P_0`) using the global discount rate. While an -explicit salvage term is not included, this approach properly captures the capital -costs incurred within the model time horizon, accounting for technology-specific -loan rates and periods. - -.. math:: - :label: obj_fixed - - C_{fixed} = \sum_{r, p, t, v \in \Theta_{CF}} \left ( - \left [ - CF_{r, p, t, v} - \cdot \frac{(1 + GDR)^{P_0 - p +1} \cdot (1 - (1 + GDR)^{-{MPL}_{r, t, v}})}{GDR} - \right ] - \cdot \textbf{CAP}_{r, t, v} - \right ) - -.. math:: - :label: obj_variable - - &C_{variable} = \\ &\quad \sum_{r, p, t, v \in \Theta_{CV}} \left ( - CV_{r, p, t, v} - \cdot - \frac{ - (1 + GDR)^{P_0 - p + 1} \cdot (1 - (1 + GDR)^{-{MPL}_{r,p,t,v}}) - }{ - GDR - }\cdot \sum_{S,D,I, O} \textbf{FO}_{r, p, s, d,i, t, v, o} - \right ) \\ &\quad + \sum_{r, p, t \not \in T^{a}, v \in \Theta_{VC}} \left ( - CV_{r, p, t, v} - \cdot - \frac{ - (1 + GDR)^{P_0 - p + 1} \cdot (1 - (1 + GDR)^{-{MPL}_{r,p,t,v}}) - }{ - GDR - } - \cdot \sum_{I, O} \textbf{FOA}_{r, p,i, t \in T^{a}, v, o} - \right ) - -""" + Using the :code:`FlowOut` and :code:`Capacity` variables, the Temoa objective + function calculates the cost of energy supply, under the assumption that capital + costs are paid through loans. This implementation sums up all the costs incurred, + and is defined as :math:`C_{tot} = C_{loans} + C_{fixed} + C_{variable}`. Each + term on the right-hand side represents the cost incurred over the model + time horizon and discounted to the initial year in the horizon (:math:`{P}_0`). + The calculation of each term is given below. + + .. math:: + :label: obj_loan + + C_{loans} = \sum_{r, t, v \in \Theta_{IC}} \left ( + \left [ + CI_{r, t, v} \cdot LA_{r, t, v} + \cdot \frac{(1 + GDR)^{P_0 - v +1} \cdot (1 - (1 + GDR)^{-LLP_{r, t, v}})}{GDR} \right. \right. + \\ \left. \left. \cdot \frac{ 1-(1+GDR)^{-LPA_{r,t,v}} }{ 1-(1+GDR)^{-LTP_{r,t,v}} } + \right ] + \cdot \textbf{CAP}_{r, t, v} + \right ) + + Note that capital costs (:math:`{IC}_{r,t,v}`) are handled in several steps. First, each capital cost + is amortized using the loan rate (i.e., technology-specific discount rate) and loan + period. Second, the annual stream of payments is converted into a lump sum using + the global discount rate and loan period. Third, the new lump sum is amortized + at the global discount rate and technology lifetime. Fourth, loan payments beyond + the model time horizon are removed and the lump sum recalculated. The terms used + in Steps 3-4 are :math:`\frac{ GDR }{ 1-(1+GDR)^{-LTP_{r,t,v} } }\cdot + \frac{ 1-(1+GDR)^{-LPA_{t,v}} }{ GDR }`. The product simplifies to + :math:`\frac{ 1-(1+GDR)^{-LPA_{r,t,v}} }{ 1-(1+GDR)^{-LTP_{r,t,v}} }`, where + :math:`LPA_{r,t,v}` represents the active lifetime of process t in region r :math:`(r,t,v)` + before the end of the model horizon, and :math:`LTP_{r,t,v}` represents the full + lifetime of a regional process :math:`(r,t,v)`. Fifth, the lump sum is discounted back to the + beginning of the horizon (:math:`P_0`) using the global discount rate. While an + explicit salvage term is not included, this approach properly captures the capital + costs incurred within the model time horizon, accounting for technology-specific + loan rates and periods. + + .. math:: + :label: obj_fixed + + C_{fixed} = \sum_{r, p, t, v \in \Theta_{CF}} \left ( + \left [ + CF_{r, p, t, v} + \cdot \frac{(1 + GDR)^{P_0 - p +1} \cdot (1 - (1 + GDR)^{-{MPL}_{r, t, v}})}{GDR} + \right ] + \cdot \textbf{CAP}_{r, t, v} + \right ) + + .. math:: + :label: obj_variable + + &C_{variable} = \\ &\quad \sum_{r, p, t, v \in \Theta_{CV}} \left ( + CV_{r, p, t, v} + \cdot + \frac{ + (1 + GDR)^{P_0 - p + 1} \cdot (1 - (1 + GDR)^{-{MPL}_{r,p,t,v}}) + }{ + GDR + }\cdot \sum_{S,D,I, O} \textbf{FO}_{r, p, s, d,i, t, v, o} + \right ) \\ &\quad + \sum_{r, p, t \not \in T^{a}, v \in \Theta_{VC}} \left ( + CV_{r, p, t, v} + \cdot + \frac{ + (1 + GDR)^{P_0 - p + 1} \cdot (1 - (1 + GDR)^{-{MPL}_{r,p,t,v}}) + }{ + GDR + } + \cdot \sum_{I, O} \textbf{FOA}_{r, p,i, t \in T^{a}, v, o} + \right ) + """ return sum(PeriodCost_rule(M, p) for p in M.time_optimize) @@ -313,11 +319,8 @@ def PeriodCost_rule(M, p): MPL = M.ModelProcessLife x = 1 + GDR # convenience variable, nothing more. - - if value(M.MyopicBaseyear) != 0: - P_0 = value(M.MyopicBaseyear) - - + if value(M.MyopicBaseyear) != 0: + P_0 = value(M.MyopicBaseyear) loan_costs = sum( M.V_Capacity[r, S_t, S_v] @@ -349,7 +352,9 @@ def PeriodCost_rule(M, p): * ( value(MPL[r, p, S_t, S_v]) if not GDR - else (x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, S_t, S_v]))) / GDR) + else ( + x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, S_t, S_v]))) / GDR + ) ) ) for r, S_p, S_t, S_v in M.CostFixed.sparse_iterkeys() @@ -363,7 +368,9 @@ def PeriodCost_rule(M, p): * ( value(MPL[r, p, S_t, S_v]) if not GDR - else (x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, S_t, S_v]))) / GDR) + else ( + x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, S_t, S_v]))) / GDR + ) ) ) for r, S_p, S_t, S_v in M.CostVariable.sparse_iterkeys() @@ -381,7 +388,9 @@ def PeriodCost_rule(M, p): * ( value(MPL[r, p, S_t, S_v]) if not GDR - else (x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, S_t, S_v]))) / GDR) + else ( + x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[r, p, S_t, S_v]))) / GDR + ) ) ) for r, S_p, S_t, S_v in M.CostVariable.sparse_iterkeys() @@ -404,47 +413,52 @@ def PeriodCost_rule(M, p): def Demand_Constraint(M, r, p, s, d, dem): r""" -The Demand constraint drives the model. This constraint ensures that supply at -least meets the demand specified by the Demand parameter in all periods and -slices, by ensuring that the sum of all the demand output commodity (:math:`c`) -generated by both commodity flow at the time slice level (:math:`\textbf{FO}`) and -the annual level (:math:`\textbf{FOA}`) must meet the modeler-specified demand -in each time slice. - -.. math:: - :label: Demand - - \sum_{I, T-T^{a}, V} \textbf{FO}_{r, p, s, d, i, t \not \in T^{a}, v, dem} + - SEG_{s,d} \cdot \sum_{I, T^{a}, V} \textbf{FOA}_{r, p, i, t \in T^{a}, v, dem} - = - {DEM}_{r, p, dem} \cdot {DSD}_{r, s, d, dem} - -Note that the validity of this constraint relies on the fact that the -:math:`C^d` set is distinct from both :math:`C^e` and :math:`C^p`. In other -words, an end-use demand must only be an end-use demand. Note that if an output -could satisfy both an end-use and internal system demand, then the output from -:math:`\textbf{FO}` and :math:`\textbf{FOA}` would be double counted. -""" - if (r,s,d,dem) not in M.DemandSpecificDistribution.sparse_keys(): + The Demand constraint drives the model. This constraint ensures that supply at + least meets the demand specified by the Demand parameter in all periods and + slices, by ensuring that the sum of all the demand output commodity (:math:`c`) + generated by both commodity flow at the time slice level (:math:`\textbf{FO}`) and + the annual level (:math:`\textbf{FOA}`) must meet the modeler-specified demand + in each time slice. + + .. math:: + :label: Demand + + \sum_{I, T-T^{a}, V} \textbf{FO}_{r, p, s, d, i, t \not \in T^{a}, v, dem} + + SEG_{s,d} \cdot \sum_{I, T^{a}, V} \textbf{FOA}_{r, p, i, t \in T^{a}, v, dem} + = + {DEM}_{r, p, dem} \cdot {DSD}_{r, s, d, dem} + + Note that the validity of this constraint relies on the fact that the + :math:`C^d` set is distinct from both :math:`C^e` and :math:`C^p`. In other + words, an end-use demand must only be an end-use demand. Note that if an output + could satisfy both an end-use and internal system demand, then the output from + :math:`\textbf{FO}` and :math:`\textbf{FOA}` would be double counted.""" + if (r, s, d, dem) not in M.DemandSpecificDistribution.sparse_keys(): return Constraint.Skip supply = sum( M.V_FlowOut[r, p, s, d, S_i, S_t, S_v, dem] - for S_t, S_v in M.commodityUStreamProcess[r, p, dem] if S_t not in M.tech_annual + for S_t, S_v in M.commodityUStreamProcess[r, p, dem] + if S_t not in M.tech_annual for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, dem] ) supply_annual = sum( M.V_FlowOutAnnual[r, p, S_i, S_t, S_v, dem] - for S_t, S_v in M.commodityUStreamProcess[r, p, dem] if S_t in M.tech_annual + for S_t, S_v in M.commodityUStreamProcess[r, p, dem] + if S_t in M.tech_annual for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, dem] - ) * value( M.SegFrac[ s, d]) + ) * value(M.SegFrac[s, d]) DemandConstraintErrorCheck(supply + supply_annual, r, p, s, d, dem) - expr = supply + supply_annual == M.Demand[r, p, dem] * M.DemandSpecificDistribution[r, s, d, dem] + expr = ( + supply + supply_annual + == M.Demand[r, p, dem] * M.DemandSpecificDistribution[r, s, d, dem] + ) return expr + def DemandActivity_Constraint(M, r, p, s, d, t, v, dem, s_0, d_0): r""" @@ -474,7 +488,7 @@ def DemandActivity_Constraint(M, r, p, s, d, t, v, dem, s_0, d_0): variations, and therefore the equation above only includes :math:`\textbf{FO}` and not :math:`\textbf{FOA}` """ - if (r,s,d,dem) not in M.DemandSpecificDistribution.sparse_keys(): + if (r, s, d, dem) not in M.DemandSpecificDistribution.sparse_keys(): return Constraint.Skip DSD = M.DemandSpecificDistribution # lazy programmer @@ -576,67 +590,97 @@ def CommodityBalance_Constraint(M, r, p, s, d, c): vflow_in_ToStorage = sum( M.V_FlowIn[r, p, s, d, c, S_t, S_v, S_o] - for S_t, S_v in M.commodityDStreamProcess[r, p, c] if S_t in M.tech_storage + for S_t, S_v in M.commodityDStreamProcess[r, p, c] + if S_t in M.tech_storage for S_o in M.ProcessOutputsByInput[r, p, S_t, S_v, c] ) vflow_in_ToNonStorage = sum( - M.V_FlowOut[r, p, s, d, c, S_t, S_v, S_o] / value(M.Efficiency[r, c, S_t, S_v, S_o]) - for S_t, S_v in M.commodityDStreamProcess[r, p, c] if S_t not in M.tech_storage and S_t not in M.tech_annual + M.V_FlowOut[r, p, s, d, c, S_t, S_v, S_o] + / value(M.Efficiency[r, c, S_t, S_v, S_o]) + for S_t, S_v in M.commodityDStreamProcess[r, p, c] + if S_t not in M.tech_storage and S_t not in M.tech_annual for S_o in M.ProcessOutputsByInput[r, p, S_t, S_v, c] ) vflow_in_ToNonStorageAnnual = value(M.SegFrac[s, d]) * sum( - M.V_FlowOutAnnual[r, p, c, S_t, S_v, S_o] / value(M.Efficiency[r, c, S_t, S_v, S_o]) - for S_t, S_v in M.commodityDStreamProcess[r, p, c] if S_t not in M.tech_storage and S_t in M.tech_annual + M.V_FlowOutAnnual[r, p, c, S_t, S_v, S_o] + / value(M.Efficiency[r, c, S_t, S_v, S_o]) + for S_t, S_v in M.commodityDStreamProcess[r, p, c] + if S_t not in M.tech_storage and S_t in M.tech_annual for S_o in M.ProcessOutputsByInput[r, p, S_t, S_v, c] ) try: - vflow_out = sum( - M.V_FlowOut[r, p, s, d, S_i, S_t, S_v, c] - for S_t, S_v in M.commodityUStreamProcess[r, p, c] - for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, c] - ) - - #export of commodity c from region r to other regions - interregional_exports = 0 - if (r, p, c) in M.exportRegions: - interregional_exports = sum( - M.V_FlowOut[r+"-"+reg, p, s, d, c, S_t, S_v, S_o] - for reg, S_t, S_v, S_o in M.exportRegions[r, p, c] - ) - - #import of commodity c from other regions into region r - interregional_imports = 0 - if (r, p, c) in M.importRegions: - interregional_imports = sum( - M.V_FlowOut[reg+"-"+r, p, s, d, S_i, S_t, S_v, c] - for reg, S_t, S_v, S_i in M.importRegions[r, p, c] - ) - - v_out_excess = 0 - if c in M.flex_commodities: - v_out_excess = sum( - M.V_Flex[r, p, s, d, S_i, S_t, S_v, c] - for S_t, S_v in M.commodityUStreamProcess[r, p, c] if S_t not in M.tech_storage and S_t not in M.tech_annual and S_t in M.tech_flex + vflow_out = sum( + M.V_FlowOut[r, p, s, d, S_i, S_t, S_v, c] + for S_t, S_v in M.commodityUStreamProcess[r, p, c] for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, c] ) + # export of commodity c from region r to other regions + interregional_exports = 0 + if (r, p, c) in M.exportRegions: + interregional_exports = sum( + M.V_FlowOut[r + "-" + reg, p, s, d, c, S_t, S_v, S_o] + for reg, S_t, S_v, S_o in M.exportRegions[r, p, c] + ) + + # import of commodity c from other regions into region r + interregional_imports = 0 + if (r, p, c) in M.importRegions: + interregional_imports = sum( + M.V_FlowOut[reg + "-" + r, p, s, d, S_i, S_t, S_v, c] + for reg, S_t, S_v, S_i in M.importRegions[r, p, c] + ) + + v_out_excess = 0 + if c in M.flex_commodities: + v_out_excess = sum( + M.V_Flex[r, p, s, d, S_i, S_t, S_v, c] + for S_t, S_v in M.commodityUStreamProcess[r, p, c] + if S_t not in M.tech_storage + and S_t not in M.tech_annual + and S_t in M.tech_flex + for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, c] + ) + except: - raise Exception('The commodity "'+str(c)+'" can be produced \ + raise Exception( + 'The commodity "' + + str(c) + + '" can be produced \ by at least one technology in the tech_annual set and one technology \ not in the tech_annual set. All the producers of the commodity must \ - either be in tech_annual or not in tech_annual') - - + either be in tech_annual or not in tech_annual' + ) - CommodityBalanceConstraintErrorCheck(vflow_out + interregional_imports, vflow_in_ToStorage + vflow_in_ToNonStorage + vflow_in_ToNonStorageAnnual + interregional_exports + v_out_excess, r, p, s, d, c) + CommodityBalanceConstraintErrorCheck( + vflow_out + interregional_imports, + vflow_in_ToStorage + + vflow_in_ToNonStorage + + vflow_in_ToNonStorageAnnual + + interregional_exports + + v_out_excess, + r, + p, + s, + d, + c, + ) - expr = vflow_out + interregional_imports == vflow_in_ToStorage + vflow_in_ToNonStorage + vflow_in_ToNonStorageAnnual + interregional_exports + v_out_excess + expr = ( + vflow_out + interregional_imports + == vflow_in_ToStorage + + vflow_in_ToNonStorage + + vflow_in_ToNonStorageAnnual + + interregional_exports + + v_out_excess + ) return expr + def CommodityBalanceAnnual_Constraint(M, r, p, c): r""" Similar to the CommodityBalance_Constraint, but this version applies only @@ -673,16 +717,20 @@ def CommodityBalanceAnnual_Constraint(M, r, p, c): return Constraint.Skip vflow_in = sum( - M.V_FlowOut[r, p, s, d, c, S_t, S_v, S_o] / value(M.Efficiency[r, c, S_t, S_v, S_o]) - for S_t, S_v in M.commodityDStreamProcess[r, p, c] if S_t not in M.tech_annual + M.V_FlowOut[r, p, s, d, c, S_t, S_v, S_o] + / value(M.Efficiency[r, c, S_t, S_v, S_o]) + for S_t, S_v in M.commodityDStreamProcess[r, p, c] + if S_t not in M.tech_annual for S_o in M.ProcessOutputsByInput[r, p, S_t, S_v, c] for d in M.time_of_day for s in M.time_season ) vflow_in_annual = sum( - M.V_FlowOutAnnual[r, p, c, S_t, S_v, S_o] / value(M.Efficiency[r, c, S_t, S_v, S_o]) - for S_t, S_v in M.commodityDStreamProcess[r, p, c] if S_t in M.tech_annual + M.V_FlowOutAnnual[r, p, c, S_t, S_v, S_o] + / value(M.Efficiency[r, c, S_t, S_v, S_o]) + for S_t, S_v in M.commodityDStreamProcess[r, p, c] + if S_t in M.tech_annual for S_o in M.ProcessOutputsByInput[r, p, S_t, S_v, c] ) @@ -692,72 +740,82 @@ def CommodityBalanceAnnual_Constraint(M, r, p, c): for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, c] ) - #export of commodity c from region r to other regions + # export of commodity c from region r to other regions interregional_exports = 0 if (r, p, c) in M.exportRegions: - interregional_exports = sum( - M.V_FlowOutAnnual[str(r)+"-"+str(reg), p, c, S_t, S_v, S_o] - for reg, S_t, S_v, S_o in M.exportRegions[r, p, c] + interregional_exports = sum( + M.V_FlowOutAnnual[str(r) + "-" + str(reg), p, c, S_t, S_v, S_o] + for reg, S_t, S_v, S_o in M.exportRegions[r, p, c] ) - #import of commodity c from other regions into region r + # import of commodity c from other regions into region r interregional_imports = 0 if (r, p, c) in M.importRegions: - interregional_imports = sum( - M.V_FlowOutAnnual[str(reg)+"-"+str(r), p, S_i, S_t, S_v, c] - for reg, S_t, S_v, S_i in M.importRegions[r, p, c] + interregional_imports = sum( + M.V_FlowOutAnnual[str(reg) + "-" + str(r), p, S_i, S_t, S_v, c] + for reg, S_t, S_v, S_i in M.importRegions[r, p, c] ) v_out_excess = 0 if c in M.flex_commodities: - v_out_excess = sum( - M.V_FlexAnnual[r, p, S_i, S_t, S_v, c] - for S_t, S_v in M.commodityUStreamProcess[r, p, c] if S_t in M.tech_flex and S_t in M.tech_annual - for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, c] - ) - - CommodityBalanceConstraintErrorCheckAnnual(vflow_out + interregional_imports, vflow_in_annual + vflow_in + interregional_exports + v_out_excess, r, p, c) + v_out_excess = sum( + M.V_FlexAnnual[r, p, S_i, S_t, S_v, c] + for S_t, S_v in M.commodityUStreamProcess[r, p, c] + if S_t in M.tech_flex and S_t in M.tech_annual + for S_i in M.ProcessInputsByOutput[r, p, S_t, S_v, c] + ) - expr = vflow_out + interregional_imports == vflow_in_annual + vflow_in + interregional_exports + v_out_excess + CommodityBalanceConstraintErrorCheckAnnual( + vflow_out + interregional_imports, + vflow_in_annual + vflow_in + interregional_exports + v_out_excess, + r, + p, + c, + ) + expr = ( + vflow_out + interregional_imports + == vflow_in_annual + vflow_in + interregional_exports + v_out_excess + ) return expr + def ResourceExtraction_Constraint(M, reg, p, r): r""" -The ResourceExtraction constraint allows a modeler to specify an annual limit on -the amount of a particular resource Temoa may use in a period. The first version -of the constraint pertains to technologies with variable output at the time slice -level, and the second version pertains to technologies with constant annual output -belonging to the :code:`tech_annual` set. + The ResourceExtraction constraint allows a modeler to specify an annual limit on + the amount of a particular resource Temoa may use in a period. The first version + of the constraint pertains to technologies with variable output at the time slice + level, and the second version pertains to technologies with constant annual output + belonging to the :code:`tech_annual` set. -.. math:: - :label: ResourceExtraction + .. math:: + :label: ResourceExtraction - \sum_{S, D, I, t \in T^r \& t \not \in T^{a}, V} \textbf{FO}_{r, p, s, d, i, t, v, c} \le RSC_{r, p, c} + \sum_{S, D, I, t \in T^r \& t \not \in T^{a}, V} \textbf{FO}_{r, p, s, d, i, t, v, c} \le RSC_{r, p, c} - \forall \{r, p, c\} \in \Theta_{\text{ResourceExtraction}} + \forall \{r, p, c\} \in \Theta_{\text{ResourceExtraction}} - \sum_{I, t \in T^r \& t \in T^{a}, V} \textbf{FOA}_{r, p, i, t, v, c} \le RSC_{r, p, c} + \sum_{I, t \in T^r \& t \in T^{a}, V} \textbf{FOA}_{r, p, i, t, v, c} \le RSC_{r, p, c} - \forall \{r, p, c\} \in \Theta_{\text{ResourceExtraction}} -""" + \forall \{r, p, c\} \in \Theta_{\text{ResourceExtraction}}""" try: - collected = sum( - M.V_FlowOut[reg, p, S_s, S_d, S_i, S_t, S_v, r] - for S_i, S_t, S_v in M.ProcessByPeriodAndOutput.keys() - for S_s in M.time_season - for S_d in M.time_of_day - ) + collected = sum( + M.V_FlowOut[reg, p, S_s, S_d, S_i, S_t, S_v, r] + for S_i, S_t, S_v in M.ProcessByPeriodAndOutput.keys() + for S_s in M.time_season + for S_d in M.time_of_day + ) except: - collected = sum( - M.V_FlowOutAnnual[reg, p, S_i, S_t, S_v, r] - for S_i, S_t, S_v in M.ProcessByPeriodAndOutput.keys() - ) + collected = sum( + M.V_FlowOutAnnual[reg, p, S_i, S_t, S_v, r] + for S_i, S_t, S_v in M.ProcessByPeriodAndOutput.keys() + ) expr = collected <= M.ResourceBound[reg, p, r] return expr + def BaseloadDiurnal_Constraint(M, r, p, s, d, t, v): r""" @@ -810,25 +868,23 @@ def BaseloadDiurnal_Constraint(M, r, p, s, d, t, v): # So: (ActA / SegA) == (ActB / SegB) # computationally, however, multiplication is cheaper than division, so: # (ActA * SegB) == (ActB * SegA) - activity_sd = sum( \ - M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd = sum( + M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) - activity_sd_0 = sum( \ - M.V_FlowOut[r, p, s, d_0, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd_0 = sum( + M.V_FlowOut[r, p, s, d_0, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) - expr = ( - activity_sd * M.SegFrac[s, d_0] - == activity_sd_0 * M.SegFrac[s, d] - ) + expr = activity_sd * M.SegFrac[s, d_0] == activity_sd_0 * M.SegFrac[s, d] return expr + def RegionalExchangeCapacity_Constraint(M, r_e, r_i, t, v): r""" @@ -846,7 +902,7 @@ def RegionalExchangeCapacity_Constraint(M, r_e, r_i, t, v): \forall \{r_e, r_i, t, v\} \in \Theta_{\text{RegionalExchangeCapacity}} """ - expr = M.V_Capacity[r_e+"-"+r_i, t, v] == M.V_Capacity[r_i+"-"+r_e, t, v] + expr = M.V_Capacity[r_e + "-" + r_i, t, v] == M.V_Capacity[r_i + "-" + r_e, t, v] return expr @@ -854,59 +910,58 @@ def RegionalExchangeCapacity_Constraint(M, r_e, r_i, t, v): def StorageEnergy_Constraint(M, r, p, s, d, t, v): r""" -This constraint tracks the storage charge level (:math:`\textbf{SL}_{r, p, s, d, t, v}`) -assuming ordered time slices. The initial storage charge level is optimized -for the first time slice in each period, and then the charge level is updated each time -slice based on the amount of energy stored or discharged. At the end of the last time -slice associated with each period, the charge level must equal the starting charge level. -In the formulation below, note that :math:`\textbf{stored\_energy}` is an internal model -decision variable. + This constraint tracks the storage charge level (:math:`\textbf{SL}_{r, p, s, d, t, v}`) + assuming ordered time slices. The initial storage charge level is optimized + for the first time slice in each period, and then the charge level is updated each time + slice based on the amount of energy stored or discharged. At the end of the last time + slice associated with each period, the charge level must equal the starting charge level. + In the formulation below, note that :math:`\textbf{stored\_energy}` is an internal model + decision variable. -First, the amount of stored energy in a given time slice is calculated as the -difference between the amount of energy stored (first term) and the amount of energy -dispatched (second term). Note that the storage device's roundtrip efficiency is applied -on the input side: + First, the amount of stored energy in a given time slice is calculated as the + difference between the amount of energy stored (first term) and the amount of energy + dispatched (second term). Note that the storage device's roundtrip efficiency is applied + on the input side: -.. math:: - :label: StorageEnergy + .. math:: + :label: StorageEnergy - \textbf{stored\_energy} = - \sum_{I, O} \textbf{FIS}_{r, p, s, d, i, t, v, o} \cdot - EFF_{r,i,t,v,o} - - - \sum_{I, O} \textbf{FO}_{r, p, s, d, i, t, v, o} + \textbf{stored\_energy} = + \sum_{I, O} \textbf{FIS}_{r, p, s, d, i, t, v, o} \cdot + EFF_{r,i,t,v,o} + - + \sum_{I, O} \textbf{FO}_{r, p, s, d, i, t, v, o} -With :math:`\textbf{stored\_energy}` calculated, the storage -charge level (:math:`\textbf{SL}_{r,p,s,d,t,v}`) is updated, but the update procedure varies -based on the time slice within each time period. For the first season and time-of-day within -a given period: + With :math:`\textbf{stored\_energy}` calculated, the storage + charge level (:math:`\textbf{SL}_{r,p,s,d,t,v}`) is updated, but the update procedure varies + based on the time slice within each time period. For the first season and time-of-day within + a given period: -.. math:: - \textbf{SL}_{r, p, s, d, t, v} = \textbf{SI}_{r,t,v} + \textbf{stored\_energy} + .. math:: + \textbf{SL}_{r, p, s, d, t, v} = \textbf{SI}_{r,t,v} + \textbf{stored\_energy} -For the first time-of-day slice in any other season except the first: + For the first time-of-day slice in any other season except the first: -.. math:: - \textbf{SL}_{r, p, s, d, t, v} = - \textbf{SL}_{r, p, s_{prev}, d_{last}, t, v} + \textbf{stored\_energy} + .. math:: + \textbf{SL}_{r, p, s, d, t, v} = + \textbf{SL}_{r, p, s_{prev}, d_{last}, t, v} + \textbf{stored\_energy} -For the last season and time-of-day in the year, the ending storage charge level -should be equal to the starting charge level: + For the last season and time-of-day in the year, the ending storage charge level + should be equal to the starting charge level: -.. math:: - \textbf{SL}_{r, p, s, d, t, v} + \textbf{stored\_energy} = \textbf{SI}_{r,t,v} + .. math:: + \textbf{SL}_{r, p, s, d, t, v} + \textbf{stored\_energy} = \textbf{SI}_{r,t,v} -For all other time slices not explicitly outlined above: - -.. math:: - \textbf{SL}_{r, p, s, d, t, v} = \textbf{SL}_{r, p, s, d_{prev}, t, v} + \textbf{stored\_energy} + For all other time slices not explicitly outlined above: -All equations below are sparsely indexed such that: + .. math:: + \textbf{SL}_{r, p, s, d, t, v} = \textbf{SL}_{r, p, s, d_{prev}, t, v} + \textbf{stored\_energy} -.. math:: - \forall \{r, p, s, d, t, v\} \in \Theta_{\text{StorageEnergy}} + All equations below are sparsely indexed such that: -""" + .. math:: + \forall \{r, p, s, d, t, v\} \in \Theta_{\text{StorageEnergy}} + """ # This is the sum of all input=i sent TO storage tech t of vintage v with # output=o in p,s,d charge = sum( @@ -930,11 +985,17 @@ def StorageEnergy_Constraint(M, r, p, s, d, t, v): # the last time slice of the last season must zero out if d == M.time_of_day.last() and s == M.time_season.last(): d_prev = M.time_of_day.prev(d) - expr = M.V_StorageLevel[r, p, s, d_prev, t, v] + stored_energy == M.V_StorageInit[r, t,v] + expr = ( + M.V_StorageLevel[r, p, s, d_prev, t, v] + stored_energy + == M.V_StorageInit[r, t, v] + ) # First time slice of the first season (i.e., start of period), starts at StorageInit level elif d == M.time_of_day.first() and s == M.time_season.first(): - expr = M.V_StorageLevel[r, p, s, d, t, v] == M.V_StorageInit[r,t,v] + stored_energy + expr = ( + M.V_StorageLevel[r, p, s, d, t, v] + == M.V_StorageInit[r, t, v] + stored_energy + ) # First time slice of any season that is NOT the first season elif d == M.time_of_day.first(): @@ -956,6 +1017,7 @@ def StorageEnergy_Constraint(M, r, p, s, d, t, v): return expr + def StorageEnergyUpperBound_Constraint(M, r, p, s, d, t, v): r""" @@ -989,7 +1051,8 @@ def StorageEnergyUpperBound_Constraint(M, r, p, s, d, t, v): M.V_Capacity[r, t, v] * M.CapacityToActivity[r, t] * (M.StorageDuration[r, t] / 8760) - * sum(M.SegFrac[s,S_d] for S_d in M.time_of_day) * 365 + * sum(M.SegFrac[s, S_d] for S_d in M.time_of_day) + * 365 * value(M.ProcessLifeFrac[r, p, t, v]) ) expr = M.V_StorageLevel[r, p, s, d, t, v] <= energy_capacity @@ -1114,7 +1177,7 @@ def StorageThroughput_Constraint(M, r, p, s, d, t, v): return expr -def StorageInit_Constraint( M, r, t, v ): +def StorageInit_Constraint(M, r, t, v): r""" This constraint is used if the users wishes to force a specific initial storage charge level @@ -1145,11 +1208,12 @@ def StorageInit_Constraint( M, r, t, v ): M.V_Capacity[r, t, v] * M.CapacityToActivity[r, t] * (M.StorageDuration[r, t] / 8760) - * sum(M.SegFrac[s,S_d] for S_d in M.time_of_day) * 365 + * sum(M.SegFrac[s, S_d] for S_d in M.time_of_day) + * 365 * value(M.ProcessLifeFrac[r, v, t, v]) ) - expr = M.V_StorageInit[r, t, v] == energy_capacity * M.StorageInitFrac[r, t, v] + expr = M.V_StorageInit[r, t, v] == energy_capacity * M.StorageInitFrac[r, t, v] return expr @@ -1203,22 +1267,22 @@ def RampUpDay_Constraint(M, r, p, s, d, t, v): """ if d != M.time_of_day.first(): d_prev = M.time_of_day.prev(d) - activity_sd_prev = sum( \ - M.V_FlowOut[r, p, s, d_prev, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd_prev = sum( + M.V_FlowOut[r, p, s, d_prev, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) - activity_sd = sum( \ - M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd = sum( + M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) expr_left = ( activity_sd / value(M.SegFrac[s, d]) - activity_sd_prev / value(M.SegFrac[s, d_prev]) - ) / value(M.CapacityToActivity[r,t]) + ) / value(M.CapacityToActivity[r, t]) expr_right = M.V_Capacity[r, t, v] * value(M.RampUp[r, t]) expr = expr_left <= expr_right else: @@ -1254,22 +1318,22 @@ def RampDownDay_Constraint(M, r, p, s, d, t, v): """ if d != M.time_of_day.first(): d_prev = M.time_of_day.prev(d) - activity_sd_prev = sum( \ - M.V_FlowOut[r, p, s, d_prev, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd_prev = sum( + M.V_FlowOut[r, p, s, d_prev, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) - activity_sd = sum( \ - M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd = sum( + M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) expr_left = ( activity_sd / value(M.SegFrac[s, d]) - activity_sd_prev / value(M.SegFrac[s, d_prev]) - ) / value(M.CapacityToActivity[r,t]) + ) / value(M.CapacityToActivity[r, t]) expr_right = -(M.V_Capacity[r, t, v] * value(M.RampDown[r, t])) expr = expr_left >= expr_right else: @@ -1308,22 +1372,22 @@ def RampUpSeason_Constraint(M, r, p, s, t, v): d_first = M.time_of_day.first() d_last = M.time_of_day.last() - activity_sd_first = sum( \ - M.V_FlowOut[r, p, s, d_first, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd_first = sum( + M.V_FlowOut[r, p, s, d_first, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) - activity_s_prev_d_last = sum( \ - M.V_FlowOut[r, p, s_prev, d_last, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_s_prev_d_last = sum( + M.V_FlowOut[r, p, s_prev, d_last, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) expr_left = ( activity_sd_first / M.SegFrac[s, d_first] - activity_s_prev_d_last / M.SegFrac[s_prev, d_last] - ) / value(M.CapacityToActivity[r,t]) + ) / value(M.CapacityToActivity[r, t]) expr_right = M.V_Capacity[r, t, v] * value(M.RampUp[r, t]) expr = expr_left <= expr_right else: @@ -1363,22 +1427,22 @@ def RampDownSeason_Constraint(M, r, p, s, t, v): d_first = M.time_of_day.first() d_last = M.time_of_day.last() - activity_sd_first = sum( \ - M.V_FlowOut[r, p, s, d_first, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_sd_first = sum( + M.V_FlowOut[r, p, s, d_first, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) - activity_s_prev_d_last = sum( \ - M.V_FlowOut[r, p, s_prev, d_last, S_i, t, v, S_o] \ - for S_i in M.processInputs[r, p, t, v] \ - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] \ + activity_s_prev_d_last = sum( + M.V_FlowOut[r, p, s_prev, d_last, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) expr_left = ( activity_sd_first / value(M.SegFrac[s, d_first]) - activity_s_prev_d_last / value(M.SegFrac[s_prev, d_last]) - ) / value(M.CapacityToActivity[r,t]) + ) / value(M.CapacityToActivity[r, t]) expr_right = -(M.V_Capacity[r, t, v] * value(M.RampDown[r, t])) expr = expr_left >= expr_right else: @@ -1388,7 +1452,6 @@ def RampDownSeason_Constraint(M, r, p, s, t, v): def RampUpPeriod_Constraint(M, r, p, t, v): - # if p != M.time_future.first(): # p_prev = M.time_future.prev(p) # s_first = M.time_season.first() @@ -1415,7 +1478,6 @@ def RampUpPeriod_Constraint(M, r, p, t, v): def RampDownPeriod_Constraint(M, r, p, t, v): - # if p != M.time_future.first(): # p_prev = M.time_future.prev(p) # s_first = M.time_season.first() @@ -1469,7 +1531,9 @@ def ReserveMargin_Constraint(M, r, p, s, d): \forall \{r, p, s, d\} \in \Theta_{\text{ReserveMargin}} """ - if (not M.tech_reserve) or ((r,p) not in M.processReservePeriods.keys()): # If reserve set empty or if r,p not in M.processReservePeriod.keys(), skip the constraint + if (not M.tech_reserve) or ( + (r, p) not in M.processReservePeriods.keys() + ): # If reserve set empty or if r,p not in M.processReservePeriod.keys(), skip the constraint return Constraint.Skip cap_avail = sum( @@ -1482,16 +1546,14 @@ def ReserveMargin_Constraint(M, r, p, s, d): if (r, p, t) in M.processVintages.keys() for v in M.processVintages[r, p, t] # Make sure (r,p,t,v) combinations are defined - if (r,p,t,v) in M.activeCapacityAvailable_rptv - - + if (r, p, t, v) in M.activeCapacityAvailable_rptv ) # In most Temoa input databases, demand is endogenous, so we use electricity # generation instead. total_generation = sum( M.V_FlowOut[r, p, s, d, S_i, t, S_v, S_o] - for (t,S_v) in M.processReservePeriods[r, p] + for (t, S_v) in M.processReservePeriods[r, p] for S_i in M.processInputs[r, p, t, S_v] for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] ) @@ -1531,19 +1593,17 @@ def EmissionLimit_Constraint(M, r, p, e): """ emission_limit = M.EmissionLimit[r, p, e] - # r can be an individual region (r='US'), or a combination of regions separated by a + (r='Mexico+US+Canada'), or 'global'. - # Note that regions!=M.regions. We iterate over regions to find actual_emissions and actual_emissions_annual. - + # r can be an individual region (r='US'), or a combination of regions separated by a + (r='Mexico+US+Canada'), or 'global'. + # Note that regions!=M.regions. We iterate over regions to find actual_emissions and actual_emissions_annual. # if r == 'global', the constraint is system-wide - if r == 'global': - regions = M.regions - elif '+' in r: - regions = r.split('+') + if r == "global": + regions = M.regions + elif "+" in r: + regions = r.split("+") else: - regions = [r] - + regions = [r] actual_emissions = sum( M.V_FlowOut[reg, p, S_s, S_d, S_i, S_t, S_v, S_o] @@ -1562,7 +1622,11 @@ def EmissionLimit_Constraint(M, r, p, e): * M.EmissionActivity[reg, e, S_i, S_t, S_v, S_o] for reg in regions for tmp_r, tmp_e, S_i, S_t, S_v, S_o in M.EmissionActivity.sparse_iterkeys() - if tmp_e == e and tmp_r == reg and S_t not in M.tech_annual and S_t in M.tech_flex and S_o in M.flex_commodities + if tmp_e == e + and tmp_r == reg + and S_t not in M.tech_annual + and S_t in M.tech_flex + and S_o in M.flex_commodities # EmissionsActivity not indexed by p, so make sure (r,p,t,v) combos valid if (reg, p, S_t, S_v) in M.processInputs.keys() for S_s in M.time_season @@ -1574,7 +1638,10 @@ def EmissionLimit_Constraint(M, r, p, e): * M.EmissionActivity[reg, e, S_i, S_t, S_v, S_o] for reg in regions for tmp_r, tmp_e, S_i, S_t, S_v, S_o in M.EmissionActivity.sparse_iterkeys() - if tmp_e == e and tmp_r == reg and S_t not in M.tech_annual and S_t in M.tech_curtailment + if tmp_e == e + and tmp_r == reg + and S_t not in M.tech_annual + and S_t in M.tech_curtailment # EmissionsActivity not indexed by p, so make sure (r,p,t,v) combos valid if (reg, p, S_t, S_v) in M.processInputs.keys() for S_s in M.time_season @@ -1596,12 +1663,22 @@ def EmissionLimit_Constraint(M, r, p, e): * M.EmissionActivity[reg, e, S_i, S_t, S_v, S_o] for reg in regions for tmp_r, tmp_e, S_i, S_t, S_v, S_o in M.EmissionActivity.sparse_iterkeys() - if tmp_e == e and tmp_r == reg and S_t in M.tech_annual and S_t in M.tech_flex and S_o in M.flex_commodities + if tmp_e == e + and tmp_r == reg + and S_t in M.tech_annual + and S_t in M.tech_flex + and S_o in M.flex_commodities # EmissionsActivity not indexed by p, so make sure (r,p,t,v) combos valid if (reg, p, S_t, S_v) in M.processInputs.keys() ) - if int is type(actual_emissions + actual_emissions_annual + actual_emissions_flex + actual_emissions_curtail + actual_emissions_flex_annual): + if int is type( + actual_emissions + + actual_emissions_annual + + actual_emissions_flex + + actual_emissions_curtail + + actual_emissions_flex_annual + ): msg = ( "Warning: No technology produces emission '%s', though limit was " "specified as %s.\n" @@ -1609,7 +1686,14 @@ def EmissionLimit_Constraint(M, r, p, e): SE.write(msg % (e, emission_limit)) return Constraint.Skip - expr = actual_emissions + actual_emissions_annual + actual_emissions_flex + actual_emissions_curtail + actual_emissions_flex_annual <= emission_limit + expr = ( + actual_emissions + + actual_emissions_annual + + actual_emissions_flex + + actual_emissions_curtail + + actual_emissions_flex_annual + <= emission_limit + ) return expr @@ -1658,52 +1742,51 @@ def GrowthRateConstraint_rule(M, p, r, t): def MaxActivity_Constraint(M, r, p, t): r""" -The MaxActivity sets an upper bound on the activity from a specific technology. -Note that the indices for these constraints are region, period and tech, not tech -and vintage. The first version of the constraint pertains to technologies with -variable output at the time slice level, and the second version pertains to -technologies with constant annual output belonging to the :code:`tech_annual` -set. - -.. math:: - :label: MaxActivity + The MaxActivity sets an upper bound on the activity from a specific technology. + Note that the indices for these constraints are region, period and tech, not tech + and vintage. The first version of the constraint pertains to technologies with + variable output at the time slice level, and the second version pertains to + technologies with constant annual output belonging to the :code:`tech_annual` + set. - \sum_{S,D,I,V,O} \textbf{FO}_{r, p, s, d, i, t, v, o} \le MAA_{r, p, t} + .. math:: + :label: MaxActivity - \forall \{r, p, t\} \in \Theta_{\text{MaxActivity}} + \sum_{S,D,I,V,O} \textbf{FO}_{r, p, s, d, i, t, v, o} \le MAA_{r, p, t} - \sum_{I,V,O} \textbf{FOA}_{r, p, i, t \in T^{a}, v, o} \le MAA_{r, p, t} + \forall \{r, p, t\} \in \Theta_{\text{MaxActivity}} - \forall \{r, p, t \in T^{a}\} \in \Theta_{\text{MaxActivity}} + \sum_{I,V,O} \textbf{FOA}_{r, p, i, t \in T^{a}, v, o} \le MAA_{r, p, t} -""" - # r can be an individual region (r='US'), or a combination of regions separated by a + (r='Mexico+US+Canada'), or 'global'. + \forall \{r, p, t \in T^{a}\} \in \Theta_{\text{MaxActivity}} + """ + # r can be an individual region (r='US'), or a combination of regions separated by a + (r='Mexico+US+Canada'), or 'global'. # if r == 'global', the constraint is system-wide - if r == 'global': - reg = M.regions - elif '+' in r: - reg = r.split('+') + if r == "global": + reg = M.regions + elif "+" in r: + reg = r.split("+") else: - reg = [r] + reg = [r] try: - activity_rpt = sum( - M.V_FlowOut[r, p, s, d, S_i, t, S_v, S_o] - for r in reg - for S_v in M.processVintages[r, p, t] - for S_i in M.processInputs[r, p, t, S_v] - for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] - for s in M.time_season - for d in M.time_of_day - ) + activity_rpt = sum( + M.V_FlowOut[r, p, s, d, S_i, t, S_v, S_o] + for r in reg + for S_v in M.processVintages[r, p, t] + for S_i in M.processInputs[r, p, t, S_v] + for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] + for s in M.time_season + for d in M.time_of_day + ) except: - activity_rpt = sum( - M.V_FlowOutAnnual[r, p, S_i, t, S_v, S_o] - for r in reg - for S_v in M.processVintages[r, p, t] - for S_i in M.processInputs[r, p, t, S_v] - for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] - ) + activity_rpt = sum( + M.V_FlowOutAnnual[r, p, S_i, t, S_v, S_o] + for r in reg + for S_v in M.processVintages[r, p, t] + for S_i in M.processInputs[r, p, t, S_v] + for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] + ) max_act = value(M.MaxActivity[r, p, t]) expr = activity_rpt <= max_act @@ -1713,52 +1796,51 @@ def MaxActivity_Constraint(M, r, p, t): def MinActivity_Constraint(M, r, p, t): r""" -The MinActivity sets a lower bound on the activity from a specific technology. -Note that the indices for these constraints are region, period and tech, not tech and -vintage. The first version of the constraint pertains to technologies with -variable output at the time slice level, and the second version pertains to -technologies with constant annual output belonging to the :code:`tech_annual` -set. - -.. math:: - :label: MinActivity + The MinActivity sets a lower bound on the activity from a specific technology. + Note that the indices for these constraints are region, period and tech, not tech and + vintage. The first version of the constraint pertains to technologies with + variable output at the time slice level, and the second version pertains to + technologies with constant annual output belonging to the :code:`tech_annual` + set. - \sum_{S,D,I,V,O} \textbf{FO}_{r, p, s, d, i, t, v, o} \ge MIA_{r, p, t} + .. math:: + :label: MinActivity - \forall \{r, p, t\} \in \Theta_{\text{MinActivity}} + \sum_{S,D,I,V,O} \textbf{FO}_{r, p, s, d, i, t, v, o} \ge MIA_{r, p, t} - \sum_{I,V,O} \textbf{FOA}_{r, p, i, t, v, o} \ge MIA_{r, p, t} + \forall \{r, p, t\} \in \Theta_{\text{MinActivity}} - \forall \{r, p, t \in T^{a}\} \in \Theta_{\text{MinActivity}} + \sum_{I,V,O} \textbf{FOA}_{r, p, i, t, v, o} \ge MIA_{r, p, t} -""" - # r can be an individual region (r='US'), or a combination of regions separated by a + (r='Mexico+US+Canada'), or 'global'. + \forall \{r, p, t \in T^{a}\} \in \Theta_{\text{MinActivity}} + """ + # r can be an individual region (r='US'), or a combination of regions separated by a + (r='Mexico+US+Canada'), or 'global'. # if r == 'global', the constraint is system-wide - if r == 'global': - reg = M.regions - elif '+' in r: - reg = r.split('+') + if r == "global": + reg = M.regions + elif "+" in r: + reg = r.split("+") else: - reg = [r] + reg = [r] try: - activity_rpt = sum( - M.V_FlowOut[r, p, s, d, S_i, t, S_v, S_o] - for r in reg - for S_v in M.processVintages[r, p, t] - for S_i in M.processInputs[r, p, t, S_v] - for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] - for s in M.time_season - for d in M.time_of_day - ) + activity_rpt = sum( + M.V_FlowOut[r, p, s, d, S_i, t, S_v, S_o] + for r in reg + for S_v in M.processVintages[r, p, t] + for S_i in M.processInputs[r, p, t, S_v] + for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] + for s in M.time_season + for d in M.time_of_day + ) except: - activity_rpt = sum( - M.V_FlowOutAnnual[r, p, S_i, t, S_v, S_o] - for r in reg - for S_v in M.processVintages[r, p, t] - for S_i in M.processInputs[r, p, t, S_v] - for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] - ) + activity_rpt = sum( + M.V_FlowOutAnnual[r, p, S_i, t, S_v, S_o] + for r in reg + for S_v in M.processVintages[r, p, t] + for S_i in M.processInputs[r, p, t, S_v] + for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] + ) min_act = value(M.MinActivity[r, p, t]) expr = activity_rpt >= min_act @@ -1768,28 +1850,28 @@ def MinActivity_Constraint(M, r, p, t): def MinActivityGroup_Constraint(M, p, g): r""" -The MinActivityGroup constraint sets a minimum activity limit for a user-defined -technology group. Each technology within each group is multiplied by a -weighting function (:math:`MGW_{r,t}`), which determines the technology activity -share that can count towards the constraint. + The MinActivityGroup constraint sets a minimum activity limit for a user-defined + technology group. Each technology within each group is multiplied by a + weighting function (:math:`MGW_{r,t}`), which determines the technology activity + share that can count towards the constraint. -.. math:: - :label: MinActivityGroup + .. math:: + :label: MinActivityGroup - \sum_{S,D,I,T,V,O} \textbf{FO}_{p, s, d, i, t, v, o} \cdot MGW_{t|t \not \in T^{a}} - + \sum_{I,T,V,O} \textbf{FOA}_{p, i, t \in T^{a}, v, o} \cdot MGW_{t \in T^{a}} - \ge MGT_{p, g} + \sum_{S,D,I,T,V,O} \textbf{FO}_{p, s, d, i, t, v, o} \cdot MGW_{t|t \not \in T^{a}} + + \sum_{I,T,V,O} \textbf{FOA}_{p, i, t \in T^{a}, v, o} \cdot MGW_{t \in T^{a}} + \ge MGT_{p, g} - \forall \{p, g\} \in \Theta_{\text{MinActivityGroup}} + \forall \{p, g\} \in \Theta_{\text{MinActivityGroup}} -where :math:`g` represents the assigned technology group and :math:`MGT_r` -refers to the :code:`MinGenGroupTarget` parameter. -""" + where :math:`g` represents the assigned technology group and :math:`MGT_r` + refers to the :code:`MinGenGroupTarget` parameter.""" activity_p = sum( M.V_FlowOut[r, p, s, d, S_i, S_t, S_v, S_o] * M.MinGenGroupWeight[r, S_t, g] for r in M.RegionalIndices - for S_t in M.tech_groups if (S_t not in M.tech_annual) and ((r, p, S_t) in M.processVintages.keys()) + for S_t in M.tech_groups + if (S_t not in M.tech_annual) and ((r, p, S_t) in M.processVintages.keys()) for S_v in M.processVintages[r, p, S_t] for S_i in M.processInputs[r, p, S_t, S_v] for S_o in M.ProcessOutputsByInput[r, p, S_t, S_v, S_i] @@ -1800,13 +1882,13 @@ def MinActivityGroup_Constraint(M, p, g): activity_p_annual = sum( M.V_FlowOutAnnual[r, p, S_i, S_t, S_v, S_o] * M.MinGenGroupWeight[r, S_t, g] for r in M.RegionalIndices - for S_t in M.tech_groups if (S_t in M.tech_annual) and ((r, p, S_t) in M.processVintages.keys()) + for S_t in M.tech_groups + if (S_t in M.tech_annual) and ((r, p, S_t) in M.processVintages.keys()) for S_v in M.processVintages[r, p, S_t] for S_i in M.processInputs[r, p, S_t, S_v] for S_o in M.ProcessOutputsByInput[r, p, S_t, S_v, S_i] ) - min_act = value(M.MinGenGroupTarget[p, g]) expr = activity_p + activity_p_annual >= min_act return expr @@ -1815,17 +1897,16 @@ def MinActivityGroup_Constraint(M, p, g): def MaxCapacity_Constraint(M, r, p, t): r""" -The MaxCapacity constraint sets a limit on the maximum available capacity of a -given technology. Note that the indices for these constraints are region, period and -tech, not tech and vintage. + The MaxCapacity constraint sets a limit on the maximum available capacity of a + given technology. Note that the indices for these constraints are region, period and + tech, not tech and vintage. -.. math:: - :label: MaxCapacity + .. math:: + :label: MaxCapacity - \textbf{CAPAVL}_{r, p, t} \le MAC_{r, p, t} + \textbf{CAPAVL}_{r, p, t} \le MAC_{r, p, t} - \forall \{r, p, t\} \in \Theta_{\text{MaxCapacity}} -""" + \forall \{r, p, t\} \in \Theta_{\text{MaxCapacity}}""" max_cap = value(M.MaxCapacity[r, p, t]) expr = M.V_CapacityAvailableByPeriodAndTech[r, p, t] <= max_cap return expr @@ -1834,38 +1915,37 @@ def MaxCapacity_Constraint(M, r, p, t): def MaxResource_Constraint(M, r, t): r""" -The MaxResource constraint sets a limit on the maximum available resource of a -given technology across all model time periods. Note that the indices for these -constraints are region and tech. + The MaxResource constraint sets a limit on the maximum available resource of a + given technology across all model time periods. Note that the indices for these + constraints are region and tech. -.. math:: - :label: MaxResource + .. math:: + :label: MaxResource - \sum_{P} \textbf{CAPAVL}_{r, p, t} \le MAR_{r, t} + \sum_{P} \textbf{CAPAVL}_{r, p, t} \le MAR_{r, t} - \forall \{r, t\} \in \Theta_{\text{MaxCapacity}} -""" + \forall \{r, t\} \in \Theta_{\text{MaxCapacity}}""" max_resource = value(M.MaxResource[r, t]) try: - activity_rt = sum( - M.V_FlowOut[r, p, s, d, S_i, t, S_v, S_o] - for p in M.time_optimize - if (r, p, t) in M.processVintages.keys() - for S_v in M.processVintages[r, p, t] - for S_i in M.processInputs[r, p, t, S_v] - for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] - for s in M.time_season - for d in M.time_of_day - ) + activity_rt = sum( + M.V_FlowOut[r, p, s, d, S_i, t, S_v, S_o] + for p in M.time_optimize + if (r, p, t) in M.processVintages.keys() + for S_v in M.processVintages[r, p, t] + for S_i in M.processInputs[r, p, t, S_v] + for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] + for s in M.time_season + for d in M.time_of_day + ) except: - activity_rt = sum( - M.V_FlowOutAnnual[r, p, S_i, t, S_v, S_o] - for p in M.time_optimize - if (r, p, t) in M.processVintages.keys() - for S_v in M.processVintages[r, p, t] - for S_i in M.processInputs[r, p, t, S_v] - for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] - ) + activity_rt = sum( + M.V_FlowOutAnnual[r, p, S_i, t, S_v, S_o] + for p in M.time_optimize + if (r, p, t) in M.processVintages.keys() + for S_v in M.processVintages[r, p, t] + for S_i in M.processInputs[r, p, t, S_v] + for S_o in M.ProcessOutputsByInput[r, p, t, S_v, S_i] + ) expr = activity_rt <= max_resource return expr @@ -1873,10 +1953,9 @@ def MaxResource_Constraint(M, r, t): def MaxCapacitySet_Constraint(M, p): r""" -Similar to the :code:`MaxCapacity` constraint, but works on a group of technologies -specified in the :code:`tech_capacity_max` subset. - -""" + Similar to the :code:`MaxCapacity` constraint, but works on a group of technologies + specified in the :code:`tech_capacity_max` subset. + """ max_cap = value(M.MaxCapacitySum[p]) aggcap = sum( M.V_CapacityAvailableByPeriodAndTech[p, t] for t in M.tech_capacity_max @@ -1888,17 +1967,16 @@ def MaxCapacitySet_Constraint(M, p): def MinCapacity_Constraint(M, r, p, t): r""" -The MinCapacity constraint sets a limit on the minimum available capacity of a -given technology. Note that the indices for these constraints are region, period and -tech, not tech and vintage. + The MinCapacity constraint sets a limit on the minimum available capacity of a + given technology. Note that the indices for these constraints are region, period and + tech, not tech and vintage. -.. math:: - :label: MinCapacityCapacityAvailableByPeriodAndTech + .. math:: + :label: MinCapacityCapacityAvailableByPeriodAndTech - \textbf{CAPAVL}_{r, p, t} \ge MIC_{r, p, t} + \textbf{CAPAVL}_{r, p, t} \ge MIC_{r, p, t} - \forall \{r, p, t\} \in \Theta_{\text{MinCapacity}} -""" + \forall \{r, p, t\} \in \Theta_{\text{MinCapacity}}""" min_cap = value(M.MinCapacity[r, p, t]) expr = M.V_CapacityAvailableByPeriodAndTech[r, p, t] >= min_cap return expr @@ -1906,10 +1984,9 @@ def MinCapacity_Constraint(M, r, p, t): def MinCapacitySet_Constraint(M, p): r""" -Similar to the :code:`MinCapacity` constraint, but works on a group of technologies -specified in the :code:`tech_capacity_min` subset. - -""" + Similar to the :code:`MinCapacity` constraint, but works on a group of technologies + specified in the :code:`tech_capacity_min` subset. + """ min_cap = value(M.MinCapacitySum[p]) aggcap = sum( M.V_CapacityAvailableByPeriodAndTech[p, t] for t in M.tech_capacity_min @@ -1920,12 +1997,11 @@ def MinCapacitySet_Constraint(M, p): def TechInputSplit_Constraint(M, r, p, s, d, i, t, v): r""" -Allows users to specify fixed or minimum shares of commodity inputs to a process -producing a single output. These shares can vary by model time period. See -TechOutputSplit_Constraint for an analogous explanation. Under this constraint, -only the technologies with variable output at the timeslice level (i.e., -NOT in the :code:`tech_annual` set) are considered. -""" + Allows users to specify fixed or minimum shares of commodity inputs to a process + producing a single output. These shares can vary by model time period. See + TechOutputSplit_Constraint for an analogous explanation. Under this constraint, + only the technologies with variable output at the timeslice level (i.e., + NOT in the :code:`tech_annual` set) are considered.""" inp = sum( M.V_FlowOut[r, p, s, d, i, t, v, S_o] / value(M.Efficiency[r, i, t, v, S_o]) for S_o in M.ProcessOutputsByInput[r, p, t, v, i] @@ -1940,14 +2016,14 @@ def TechInputSplit_Constraint(M, r, p, s, d, i, t, v): expr = inp >= M.TechInputSplit[r, p, i, t] * total_inp return expr + def TechInputSplitAnnual_Constraint(M, r, p, i, t, v): r""" -Allows users to specify fixed or minimum shares of commodity inputs to a process -producing a single output. These shares can vary by model time period. See -TechOutputSplitAnnual_Constraint for an analogous explanation. Under this -function, only the technologies with constant annual output (i.e., members -of the :math:`tech_annual` set) are considered. -""" + Allows users to specify fixed or minimum shares of commodity inputs to a process + producing a single output. These shares can vary by model time period. See + TechOutputSplitAnnual_Constraint for an analogous explanation. Under this + function, only the technologies with constant annual output (i.e., members + of the :math:`tech_annual` set) are considered.""" inp = sum( M.V_FlowOutAnnual[r, p, i, t, v, S_o] / value(M.Efficiency[r, i, t, v, S_o]) for S_o in M.ProcessOutputsByInput[r, p, t, v, i] @@ -1962,15 +2038,15 @@ def TechInputSplitAnnual_Constraint(M, r, p, i, t, v): expr = inp >= M.TechInputSplit[r, p, i, t] * total_inp return expr + def TechInputSplitAverage_Constraint(M, r, p, i, t, v): r""" -Allows users to specify fixed or minimum shares of commodity inputs to a process -producing a single output. Under this constraint, only the technologies with variable -output at the timeslice level (i.e., NOT in the :code:`tech_annual` set) are considered. -This constraint differs from TechInputSplit as it specifies shares on an annual basis, -so even though it applies to technologies with variable output at the timeslice level, -the constraint only fixes the input shares over the course of a year. -""" + Allows users to specify fixed or minimum shares of commodity inputs to a process + producing a single output. Under this constraint, only the technologies with variable + output at the timeslice level (i.e., NOT in the :code:`tech_annual` set) are considered. + This constraint differs from TechInputSplit as it specifies shares on an annual basis, + so even though it applies to technologies with variable output at the timeslice level, + the constraint only fixes the input shares over the course of a year.""" inp = sum( M.V_FlowOut[r, p, s, d, i, t, v, S_o] / value(M.Efficiency[r, i, t, v, S_o]) @@ -1987,77 +2063,76 @@ def TechInputSplitAverage_Constraint(M, r, p, i, t, v): for S_o in M.ProcessOutputsByInput[r, p, t, v, i] ) - expr = inp >= M.TechInputSplitAverage[r, p, i, t] * total_inp - return expr + return expr + def TechOutputSplit_Constraint(M, r, p, s, d, t, v, o): r""" -Some processes take a single input and make multiple outputs, and the user would like to -specify either a constant or time-varying ratio of outputs per unit input. The most -canonical example is an oil refinery. Crude oil is used to produce many different refined -products. In many cases, the modeler would like to specify a minimum share of each refined -product produced by the refinery. + Some processes take a single input and make multiple outputs, and the user would like to + specify either a constant or time-varying ratio of outputs per unit input. The most + canonical example is an oil refinery. Crude oil is used to produce many different refined + products. In many cases, the modeler would like to specify a minimum share of each refined + product produced by the refinery. -For example, a hypothetical (and highly simplified) refinery might have a crude oil input -that produces 4 parts diesel, 3 parts gasoline, and 2 parts kerosene. The relative -ratios to the output then are: + For example, a hypothetical (and highly simplified) refinery might have a crude oil input + that produces 4 parts diesel, 3 parts gasoline, and 2 parts kerosene. The relative + ratios to the output then are: -.. math:: + .. math:: - d = \tfrac{4}{9} \cdot \text{total output}, \qquad - g = \tfrac{3}{9} \cdot \text{total output}, \qquad - k = \tfrac{2}{9} \cdot \text{total output} + d = \tfrac{4}{9} \cdot \text{total output}, \qquad + g = \tfrac{3}{9} \cdot \text{total output}, \qquad + k = \tfrac{2}{9} \cdot \text{total output} -Note that it is possible to specify output shares that sum to less than unity. In such -cases, the model optimizes the remaining share. In addition, it is possible to change the -specified shares by model time period. Under this constraint, only the -technologies with variable output at the timeslice level (i.e., NOT in the -:code:`tech_annual` set) are considered. + Note that it is possible to specify output shares that sum to less than unity. In such + cases, the model optimizes the remaining share. In addition, it is possible to change the + specified shares by model time period. Under this constraint, only the + technologies with variable output at the timeslice level (i.e., NOT in the + :code:`tech_annual` set) are considered. -The constraint is formulated as follows: + The constraint is formulated as follows: -.. math:: - :label: TechOutputSplit + .. math:: + :label: TechOutputSplit - \sum_{I, t \not \in T^{a}} \textbf{FO}_{r, p, s, d, i, t, v, o} - \geq - TOS_{r, p, t, o} \cdot \sum_{I, O, t \not \in T^{a}} \textbf{FO}_{r, p, s, d, i, t, v, o} + \sum_{I, t \not \in T^{a}} \textbf{FO}_{r, p, s, d, i, t, v, o} + \geq + TOS_{r, p, t, o} \cdot \sum_{I, O, t \not \in T^{a}} \textbf{FO}_{r, p, s, d, i, t, v, o} - \forall \{r, p, s, d, t, v, o\} \in \Theta_{\text{TechOutputSplit}} -""" + \forall \{r, p, s, d, t, v, o\} \in \Theta_{\text{TechOutputSplit}}""" out = sum( M.V_FlowOut[r, p, s, d, S_i, t, v, o] for S_i in M.ProcessInputsByOutput[r, p, t, v, o] ) total_out = sum( - M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] - for S_i in M.processInputs[r, p, t, v] - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] + M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) expr = out >= M.TechOutputSplit[r, p, t, o] * total_out return expr -def TechOutputSplitAnnual_Constraint ( M, r, p, t, v, o): + +def TechOutputSplitAnnual_Constraint(M, r, p, t, v, o): r""" -This constraint operates similarly to TechOutputSplit_Constraint. -However, under this function, only the technologies with constant annual -output (i.e., members of the :math:`tech_annual` set) are considered. + This constraint operates similarly to TechOutputSplit_Constraint. + However, under this function, only the technologies with constant annual + output (i.e., members of the :math:`tech_annual` set) are considered. -.. math:: - :label: TechOutputSplitAnnual + .. math:: + :label: TechOutputSplitAnnual - \sum_{I, T^{a}} \textbf{FOA}_{r, p, i, t \in T^{a}, v, o} + \sum_{I, T^{a}} \textbf{FOA}_{r, p, i, t \in T^{a}, v, o} - \geq + \geq - TOS_{r, p, t, o} \cdot \sum_{I, O, T^{a}} \textbf{FOA}_{r, p, s, d, i, t \in T^{a}, v, o} + TOS_{r, p, t, o} \cdot \sum_{I, O, T^{a}} \textbf{FOA}_{r, p, s, d, i, t \in T^{a}, v, o} - \forall \{r, p, t \in T^{a}, v, o\} \in \Theta_{\text{TechOutputSplitAnnual}} -""" + \forall \{r, p, t \in T^{a}, v, o\} \in \Theta_{\text{TechOutputSplitAnnual}}""" out = sum( M.V_FlowOutAnnual[r, p, S_i, t, v, o] for S_i in M.ProcessInputsByOutput[r, p, t, v, o] @@ -2067,11 +2142,12 @@ def TechOutputSplitAnnual_Constraint ( M, r, p, t, v, o): M.V_FlowOutAnnual[r, p, S_i, t, v, S_o] for S_i in M.processInputs[r, p, t, v] for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] - ) + ) expr = out >= M.TechOutputSplit[r, p, t, o] * total_out return expr + # --------------------------------------------------------------- # Define rule-based parameters # --------------------------------------------------------------- @@ -2129,52 +2205,58 @@ def ParamLoanAnnualize_rule(M, r, t, v): return annualized_rate - def LinkedEmissionsTech_Constraint(M, r, p, s, d, t, v, e): r""" -This constraint is necessary for carbon capture technologies that produce -CO2 as an emissions commodity, but the CO2 also serves as a physical -input commodity to a downstream process, such as synthetic fuel production. -To accomplish this, a dummy technology is linked to the CO2-producing -technology, converting the emissions activity into a physical commodity -amount as follows: - -.. math:: - :label: LinkedEmissionsTech - - - \sum_{I, O} \textbf{FO}_{r, p, s, d, i, t, v, o} \cdot EAC_{r, e, i, t, v, o} - = \sum_{I, O} \textbf{FO}_{r, p, s, d, i, t, v, o} - - \forall \{r, p, s, d, t, v, e\} \in \Theta_{\text{LinkedTechs}} - -The relationship between the primary and linked technologies is given -in the :code:`LinkedTechs` table. Note that the primary and linked -technologies cannot be part of the :code:`tech_annual` set. It is implicit that -the primary region corresponds to the linked technology as well. The lifetimes -of the primary and linked technologies should be specified and identical. -""" + This constraint is necessary for carbon capture technologies that produce + CO2 as an emissions commodity, but the CO2 also serves as a physical + input commodity to a downstream process, such as synthetic fuel production. + To accomplish this, a dummy technology is linked to the CO2-producing + technology, converting the emissions activity into a physical commodity + amount as follows: + + .. math:: + :label: LinkedEmissionsTech + + - \sum_{I, O} \textbf{FO}_{r, p, s, d, i, t, v, o} \cdot EAC_{r, e, i, t, v, o} + = \sum_{I, O} \textbf{FO}_{r, p, s, d, i, t, v, o} + + \forall \{r, p, s, d, t, v, e\} \in \Theta_{\text{LinkedTechs}} + + The relationship between the primary and linked technologies is given + in the :code:`LinkedTechs` table. Note that the primary and linked + technologies cannot be part of the :code:`tech_annual` set. It is implicit that + the primary region corresponds to the linked technology as well. The lifetimes + of the primary and linked technologies should be specified and identical.""" linked_t = M.LinkedTechs[r, t, e] - if (r,t,v) in M.LifetimeProcess.keys() and M.LifetimeProcess[r, linked_t,v] != M.LifetimeProcess[r, t,v]: - msg = ('the LifetimeProcess values of the primary and linked technologies ' - 'in the LinkedTechs table have to be specified and identical') - raise Exception( msg ) - if (r,t) in M.LifetimeTech.keys() and M.LifetimeTech[r, linked_t] != M.LifetimeTech[r, t]: - msg = ('the LifetimeTech values of the primary and linked technologies ' - 'in the LinkedTechs table have to be specified and identical') - raise Exception( msg ) + if (r, t, v) in M.LifetimeProcess.keys() and M.LifetimeProcess[ + r, linked_t, v + ] != M.LifetimeProcess[r, t, v]: + msg = ( + "the LifetimeProcess values of the primary and linked technologies " + "in the LinkedTechs table have to be specified and identical" + ) + raise Exception(msg) + if (r, t) in M.LifetimeTech.keys() and M.LifetimeTech[ + r, linked_t + ] != M.LifetimeTech[r, t]: + msg = ( + "the LifetimeTech values of the primary and linked technologies " + "in the LinkedTechs table have to be specified and identical" + ) + raise Exception(msg) primary_flow = sum( - M.V_FlowOut[r, p, s, d, S_i, t, v, S_o]*M.EmissionActivity[r, e, S_i, t, v, S_o] - for S_i in M.processInputs[r, p, t, v] - for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] + M.V_FlowOut[r, p, s, d, S_i, t, v, S_o] + * M.EmissionActivity[r, e, S_i, t, v, S_o] + for S_i in M.processInputs[r, p, t, v] + for S_o in M.ProcessOutputsByInput[r, p, t, v, S_i] ) linked_flow = sum( - M.V_FlowOut[r, p, s, d, S_i, linked_t, v, S_o] - for S_i in M.processInputs[r, p, linked_t, v] - for S_o in M.ProcessOutputsByInput[r, p, linked_t, v, S_i] + M.V_FlowOut[r, p, s, d, S_i, linked_t, v, S_o] + for S_i in M.processInputs[r, p, linked_t, v] + for S_o in M.ProcessOutputsByInput[r, p, linked_t, v, S_i] ) expr = -primary_flow == linked_flow return expr - diff --git a/temoa_model/temoa_run.py b/temoa_model/temoa_run.py index f6291a92..a0dfd5e9 100755 --- a/temoa_model/temoa_run.py +++ b/temoa_model/temoa_run.py @@ -51,14 +51,14 @@ from collections import defaultdict from temoa_rules import TotalCost_rule, ActivityByTech_Constraint -from temoa_mga import ActivityObj_rule, SlackedObjective_rule, PreviousAct_rule +from temoa_mga import ActivityObj_rule, SlackedObjective_rule, PreviousAct_rule import traceback signal(SIGINT, default_int_handler) -''' +""" This is the main solver class. This takes in input an Abstract Model after parameters initialization, and a config_filename (which contains the input parameters) @@ -77,503 +77,614 @@ This will yield each statement being yielded by function_call(). This is followed all the way through to the first function_call of the UI where it is returned as a StreamingHttpResponse(). -''' +""" + + class TemoaSolver(object): - def __init__(self, model, config_filename): - self.model = model - self.config_filename = config_filename - self.temoa_setup() - self.temoa_checks() - - def temoa_setup (self): - """This function prepares the model to be solved. - - Inputs: - model -- the model object - config_filename -- config filename, non-blank if called from the UI - There are three possible ways to call the model: - 1. python temoa_model/ /path/to/data_files - 2. python temoa_model/ --config=/path/to/config/file - 3. function call from the UI - This function discerns which way the model was called and process the - inputs accordingly. - """ - if self.config_filename == '': # Called from the command line - self.options, config_flag = parse_args() - if config_flag == 1: # Option 2 (using config file) - self.options.path_to_lp_files = self.options.path_to_logs + sep + "lp_files" - TempfileManager.tempdir = self.options.path_to_lp_files - else: # Must be Option 1 (no config file) - pass - - else: # Config file already specified, so must be an interface call - available_solvers, default_solver = get_solvers() - temoa_config = TemoaConfig(d_solver=default_solver) - temoa_config.build(config=self.config_filename) - self.options = temoa_config - - self.temp_lp_dest = '/srv/thirdparty/temoa/data_files/' - - self.options.path_to_lp_files = self.options.path_to_logs + sep + "lp_files" - TempfileManager.tempdir = self.options.path_to_lp_files - - - def temoa_checks(self): - """Make sure Python 2.7 is used and that a suitable solver is available.""" - - if version_info < (2, 7): - msg = ("Temoa requires Python v2.7 to run.\n\n The model may not solve" - "properly with another version.") - raise SystemExit( msg ) - - if self.options.neos is True: - # Invoke NEOS solver manager if flag is specified in config file - self.optimizer = pyomo.opt.SolverManagerFactory('neos') - else: - self.optimizer = SolverFactory( self.options.solver ) - - if self.optimizer: - pass - elif self.options.solver != 'NONE': - SE.write( "\nWarning: Unable to initialize solver interface for '{}'\n\n" - .format( self.options.solver )) - if SE.isatty(): - SE.write( "Please press enter to continue or Ctrl+C to quit." ) - if os.path.join('temoa_model','config_sample_myopic') not in options.file_location: - raw_input() - - - ''' + def __init__(self, model, config_filename): + self.model = model + self.config_filename = config_filename + self.temoa_setup() + self.temoa_checks() + + def temoa_setup(self): + """This function prepares the model to be solved. + + Inputs: + model -- the model object + config_filename -- config filename, non-blank if called from the UI + There are three possible ways to call the model: + 1. python temoa_model/ /path/to/data_files + 2. python temoa_model/ --config=/path/to/config/file + 3. function call from the UI + This function discerns which way the model was called and process the + inputs accordingly. + """ + if self.config_filename == "": # Called from the command line + self.options, config_flag = parse_args() + if config_flag == 1: # Option 2 (using config file) + self.options.path_to_lp_files = ( + self.options.path_to_logs + sep + "lp_files" + ) + TempfileManager.tempdir = self.options.path_to_lp_files + else: # Must be Option 1 (no config file) + pass + + else: # Config file already specified, so must be an interface call + available_solvers, default_solver = get_solvers() + temoa_config = TemoaConfig(d_solver=default_solver) + temoa_config.build(config=self.config_filename) + self.options = temoa_config + + self.temp_lp_dest = "/srv/thirdparty/temoa/data_files/" + + self.options.path_to_lp_files = self.options.path_to_logs + sep + "lp_files" + TempfileManager.tempdir = self.options.path_to_lp_files + + def temoa_checks(self): + """Make sure Python 2.7 is used and that a suitable solver is available.""" + + if version_info < (2, 7): + msg = ( + "Temoa requires Python v2.7 to run.\n\n The model may not solve" + "properly with another version." + ) + raise SystemExit(msg) + + if self.options.neos is True: + # Invoke NEOS solver manager if flag is specified in config file + self.optimizer = pyomo.opt.SolverManagerFactory("neos") + else: + self.optimizer = SolverFactory(self.options.solver) + + if self.optimizer: + pass + elif self.options.solver != "NONE": + SE.write( + "\nWarning: Unable to initialize solver interface for '{}'\n\n".format( + self.options.solver + ) + ) + if SE.isatty(): + SE.write("Please press enter to continue or Ctrl+C to quit.") + if ( + os.path.join("temoa_model", "config_sample_myopic") + not in options.file_location + ): + raw_input() + + """ This function is called when MGA option is specified. It uses the self.model, self.optimzer, and self.options parameters of the class object - ''' - def solveWithMGA(self): - scenario_names = [] - scenario_names.append( self.options.scenario ) - - # The MGA algorithm uses different objectives per iteration, so the first - # step is to remove the original objective function - self.model.del_component( 'TotalCost' ) - # Create concrete model - temoaInstance1 = TemoaSolverInstance(self.model, self.optimizer, self.options, self.txt_file) - for k in temoaInstance1.create_temoa_instance(): - # yield "
" + k + "
" - yield k - #yield " " * 1024 - # Now add back the objective function that we earlier removed; note that name - # we choose here (FirstObj) will be copied to the output file. - temoaInstance1.instance.FirstObj = Objective( rule=TotalCost_rule, sense=minimize ) - temoaInstance1.instance.preprocess() - temoaInstance1.instance.V_ActivityByTech = Var(temoaInstance1.instance.tech_all, domain=NonNegativeReals) - temoaInstance1.instance.ActivityByTechConstraint = Constraint(temoaInstance1.instance.tech_all, rule=ActivityByTech_Constraint) - - for k in temoaInstance1.solve_temoa_instance(): - # yield "
" + k + "
" - yield k - #yield " " * 1024 - - temoaInstance1.handle_files(log_name='Complete_OutputLog.log' ) - # using value() converts the now-loaded results into a single number, - # which we'll use with our slightly unusual SlackedObjective_rule below - # (but defined above). - Perfect_Foresight_Obj = value( temoaInstance1.instance.FirstObj ) - - # Create a new dictionary that stores the MGA objective function weights - prev_activity_t = defaultdict( int ) - # Store first set of MGA objective weights drawn from base solution - prev_activity_t = PreviousAct_rule( temoaInstance1.instance, self.options.mga_weight, prev_activity_t ) - - # Perform MGA iterations - while self.options.next_mga(): - temoaMGAInstance = TemoaSolverInstance(self.model, self.optimizer, self.options, self.txt_file) - for k in temoaMGAInstance.create_temoa_instance(): - # yield "
" + k + "
" - yield k - #yield " " * 1024 - - try: - txt_file_mga = open(self.options.path_to_logs+os.sep+"Complete_OutputLog.log", "w") - except BaseException as io_exc: - yield "MGA Log file cannot be opened. Please check path. Trying to find:\n"+self.options.path_to_logs+" folder\n" - SE.write("MGA Log file cannot be opened. Please check path. Trying to find:\n"+self.options.path_to_logs+" folder\n") - txt_file_mga = open("OutputLog_MGA_last.log", "w") - - # Set up the Activity By Tech constraint, which is required for the - # updated objective function. - temoaMGAInstance.instance.V_ActivityByTech = Var(temoaMGAInstance.instance.tech_all, domain=NonNegativeReals) - temoaMGAInstance.instance.ActivityByTechConstraint = Constraint(temoaMGAInstance.instance.tech_all, rule=ActivityByTech_Constraint) - - # Update second instance with the new MGA-specific objective function - # and constraint. - temoaMGAInstance.instance.SecondObj = Objective( - expr=ActivityObj_rule( temoaMGAInstance.instance, prev_activity_t ), - noruleinit=True, - sense=minimize - ) - temoaMGAInstance.instance.PreviousSlackedObjective = Constraint( - rule=None, - expr=SlackedObjective_rule( temoaMGAInstance.instance, Perfect_Foresight_Obj, self.options.mga ), - noruleinit=True - ) - temoaMGAInstance.instance.preprocess() - for k in temoaMGAInstance.solve_temoa_instance(): - # yield "
" + k + "
" - yield k - #yield " " * 1024 - temoaMGAInstance.handle_files(log_name='Complete_OutputLog.log' ) - #Update MGA objective function weights for use in the next iteration - prev_activity_t = PreviousAct_rule( temoaMGAInstance.instance, self.options.mga_weight, prev_activity_t ) - - - ''' + """ + + def solveWithMGA(self): + scenario_names = [] + scenario_names.append(self.options.scenario) + + # The MGA algorithm uses different objectives per iteration, so the first + # step is to remove the original objective function + self.model.del_component("TotalCost") + # Create concrete model + temoaInstance1 = TemoaSolverInstance( + self.model, self.optimizer, self.options, self.txt_file + ) + for k in temoaInstance1.create_temoa_instance(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + # Now add back the objective function that we earlier removed; note that name + # we choose here (FirstObj) will be copied to the output file. + temoaInstance1.instance.FirstObj = Objective( + rule=TotalCost_rule, sense=minimize + ) + temoaInstance1.instance.preprocess() + temoaInstance1.instance.V_ActivityByTech = Var( + temoaInstance1.instance.tech_all, domain=NonNegativeReals + ) + temoaInstance1.instance.ActivityByTechConstraint = Constraint( + temoaInstance1.instance.tech_all, rule=ActivityByTech_Constraint + ) + + for k in temoaInstance1.solve_temoa_instance(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + + temoaInstance1.handle_files(log_name="Complete_OutputLog.log") + # using value() converts the now-loaded results into a single number, + # which we'll use with our slightly unusual SlackedObjective_rule below + # (but defined above). + Perfect_Foresight_Obj = value(temoaInstance1.instance.FirstObj) + + # Create a new dictionary that stores the MGA objective function weights + prev_activity_t = defaultdict(int) + # Store first set of MGA objective weights drawn from base solution + prev_activity_t = PreviousAct_rule( + temoaInstance1.instance, self.options.mga_weight, prev_activity_t + ) + + # Perform MGA iterations + while self.options.next_mga(): + temoaMGAInstance = TemoaSolverInstance( + self.model, self.optimizer, self.options, self.txt_file + ) + for k in temoaMGAInstance.create_temoa_instance(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + + try: + txt_file_mga = open( + self.options.path_to_logs + os.sep + "Complete_OutputLog.log", "w" + ) + except BaseException as io_exc: + yield "MGA Log file cannot be opened. Please check path. Trying to find:\n" + self.options.path_to_logs + " folder\n" + SE.write( + "MGA Log file cannot be opened. Please check path. Trying to find:\n" + + self.options.path_to_logs + + " folder\n" + ) + txt_file_mga = open("OutputLog_MGA_last.log", "w") + + # Set up the Activity By Tech constraint, which is required for the + # updated objective function. + temoaMGAInstance.instance.V_ActivityByTech = Var( + temoaMGAInstance.instance.tech_all, domain=NonNegativeReals + ) + temoaMGAInstance.instance.ActivityByTechConstraint = Constraint( + temoaMGAInstance.instance.tech_all, rule=ActivityByTech_Constraint + ) + + # Update second instance with the new MGA-specific objective function + # and constraint. + temoaMGAInstance.instance.SecondObj = Objective( + expr=ActivityObj_rule(temoaMGAInstance.instance, prev_activity_t), + noruleinit=True, + sense=minimize, + ) + temoaMGAInstance.instance.PreviousSlackedObjective = Constraint( + rule=None, + expr=SlackedObjective_rule( + temoaMGAInstance.instance, Perfect_Foresight_Obj, self.options.mga + ), + noruleinit=True, + ) + temoaMGAInstance.instance.preprocess() + for k in temoaMGAInstance.solve_temoa_instance(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + temoaMGAInstance.handle_files(log_name="Complete_OutputLog.log") + # Update MGA objective function weights for use in the next iteration + prev_activity_t = PreviousAct_rule( + temoaMGAInstance.instance, self.options.mga_weight, prev_activity_t + ) + + """ This function is called when MGA option is not specified. - ''' - def solveWithoutMGA(self): - - temoaInstance1 = TemoaSolverInstance(self.model, self.optimizer, self.options, self.txt_file) - - if hasattr(self.options, 'myopic') and self.options.myopic: - - print ('This run is myopic ...') - from temoa_myopic import myopic_db_generator_solver - myopic_db_generator_solver ( self ) - - else: - - for k in temoaInstance1.create_temoa_instance(): - # yield "
" + k + "
" - yield k - #yield " " * 1024 - for k in temoaInstance1.solve_temoa_instance(): - # yield "
" + k + "
" - yield k - #yield " " * 1024 - temoaInstance1.handle_files(log_name='Complete_OutputLog.log') - - ''' + """ + + def solveWithoutMGA(self): + temoaInstance1 = TemoaSolverInstance( + self.model, self.optimizer, self.options, self.txt_file + ) + + if hasattr(self.options, "myopic") and self.options.myopic: + print("This run is myopic ...") + from temoa_myopic import myopic_db_generator_solver + + myopic_db_generator_solver(self) + + else: + for k in temoaInstance1.create_temoa_instance(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + for k in temoaInstance1.solve_temoa_instance(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + temoaInstance1.handle_files(log_name="Complete_OutputLog.log") + + """ This funciton creates and solves TemoaSolverInstance. This is the function that should be called from outside this class after __init__ - ''' - def createAndSolve(self): - try: - self.txt_file = open(self.options.path_to_logs+os.sep+"Complete_OutputLog.log", "w") - - except BaseException as io_exc: - yield "Log file cannot be opened. Please check path. Trying to find:\n"+self.options.path_to_logs+" folder\n" - SE.write("Log file cannot be opened. Please check path. Trying to find:\n"+self.options.path_to_logs+" folder\n") - self.txt_file = open("Complete_OutputLog.log", "w") - self.txt_file.write("Log file cannot be opened. Please check path. Trying to find:\n"+self.options.path_to_logs+" folder\n") - - # Check and see if mga attribute exists and if mga is specified - try: - if hasattr(self.options, 'mga') and self.options.mga: - for k in self.solveWithMGA(): - #yield "
" + k + "
" - yield k - #yield " " * 1024 - else: # User requested a single run - for k in self.solveWithoutMGA(): - #yield "
" + k + "
" - yield k - #yield " " * 1024 - - except KeyboardInterrupt as e: - self.txt_file.close() - yield str(e) + '\n' - yield 'User requested quit. Exiting Temoa ...\n' - SE.write(str(e)+'\n') - SE.write( 'User requested quit. Exiting Temoa ...\n' ) - traceback.print_exc() - SE.flush() - except SystemExit as e: - self.txt_file.close() - yield str(e) + '\n' - yield 'Temoa exit requested. Exiting ...\n' - SE.write(str(e)+'\n') - SE.write( 'Temoa exit requested. Exiting ...\n' ) - traceback.print_exc() - SE.flush() - except Exception as e: - self.txt_file.close() - yield str(e) + '\n' - yield 'Exiting Temoa ...\n' - SE.write(str(e)+'\n') - SE.write( 'Exiting Temoa ...\n' ) - traceback.print_exc() - SE.flush() - - - -''' + """ + + def createAndSolve(self): + try: + self.txt_file = open( + self.options.path_to_logs + os.sep + "Complete_OutputLog.log", "w" + ) + + except BaseException as io_exc: + yield "Log file cannot be opened. Please check path. Trying to find:\n" + self.options.path_to_logs + " folder\n" + SE.write( + "Log file cannot be opened. Please check path. Trying to find:\n" + + self.options.path_to_logs + + " folder\n" + ) + self.txt_file = open("Complete_OutputLog.log", "w") + self.txt_file.write( + "Log file cannot be opened. Please check path. Trying to find:\n" + + self.options.path_to_logs + + " folder\n" + ) + + # Check and see if mga attribute exists and if mga is specified + try: + if hasattr(self.options, "mga") and self.options.mga: + for k in self.solveWithMGA(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + else: # User requested a single run + for k in self.solveWithoutMGA(): + # yield "
" + k + "
" + yield k + # yield " " * 1024 + + except KeyboardInterrupt as e: + self.txt_file.close() + yield str(e) + "\n" + yield "User requested quit. Exiting Temoa ...\n" + SE.write(str(e) + "\n") + SE.write("User requested quit. Exiting Temoa ...\n") + traceback.print_exc() + SE.flush() + except SystemExit as e: + self.txt_file.close() + yield str(e) + "\n" + yield "Temoa exit requested. Exiting ...\n" + SE.write(str(e) + "\n") + SE.write("Temoa exit requested. Exiting ...\n") + traceback.print_exc() + SE.flush() + except Exception as e: + self.txt_file.close() + yield str(e) + "\n" + yield "Exiting Temoa ...\n" + SE.write(str(e) + "\n") + SE.write("Exiting Temoa ...\n") + traceback.print_exc() + SE.flush() + + +""" This class is for creating one temoa solver instance. It is used by TemoaSolver. (Multiple instances are created for MGA/non-MGA options). -''' +""" + + class TemoaSolverInstance(object): - def __init__(self, model, optimizer, options, txt_file): - self.model = model - self.options = options - self.optimizer = optimizer - self.txt_file = txt_file - - def create_temoa_instance (self): - """Create a single instance of Temoa.""" - - try: - if self.options.keepPyomoLP: - yield '\nSolver will write file: {}\n\n'.format( self.options.scenario + '.lp' ) - SE.write('\nSolver will write file: {}\n\n'.format( self.options.scenario + '.lp' )) - self.txt_file.write('\nSolver will write file: {}\n\n'.format( self.options.scenario + '.lp' )) - - yield 'Reading data files.' - SE.write( '[ ] Reading data files.'); SE.flush() - self.txt_file.write( 'Reading data files.') - begin = time() - duration = lambda: time() - begin - - modeldata = DataPortal( model=self.model ) - # Recreate the pyomo command's ability to specify multiple "dot dat" files - # on the command lin - for fname in self.options.dot_dat: - if fname[-4:] != '.dat': - msg = "InputError: expecting a dot dat (e.g., data.dat) file, found '{}'\n" - raise Exception( msg.format( fname )) - modeldata.load( filename=fname ) - yield '\t\t\t\t\t[%8.2f]\n' % duration() - SE.write( '\r[%8.2f]\n' % duration() ) - self.txt_file.write( '[%8.2f]\n' % duration() ) - - yield 'Creating Temoa model instance.' - SE.write( '[ ] Creating Temoa model instance.'); SE.flush() - self.txt_file.write( 'Creating Temoa model instance.') - - self.model.dual = Suffix(direction=Suffix.IMPORT) - #self.model.rc = Suffix(direction=Suffix.IMPORT) - #self.model.slack = Suffix(direction=Suffix.IMPORT) - - self.instance = self.model.create_instance( modeldata ) - yield '\t\t\t\t[%8.2f]\n' % duration() - SE.write( '\r[%8.2f]\n' % duration() ) - self.txt_file.write( '[%8.2f]\n' % duration() ) - - except Exception as model_exc: - yield "Exception found in create_temoa_instance\n" - SE.write("Exeception found in create_temoa_instance\n") - self.txt_file.write("Exception found in create_temoa_instance\n") - yield str(model_exc) - SE.write(str(model_exc)) - self.txt_file.write(str(model_exc)) - raise model_exc - - - def solve_temoa_instance (self): - '''Solve a Temoa instance.''' - - begin = time() - duration = lambda: time() - begin - try: - yield 'Solving.' - SE.write( '[ ] Solving.'); SE.flush() - self.txt_file.write( 'Solving.') - if self.optimizer: - if self.options.neos: - self.result = self.optimizer.solve(self.instance, opt=self.options.solver) - else: - self.result = self.optimizer.solve( self.instance, suffixes=['dual'],# 'rc', 'slack'], - keepfiles=self.options.keepPyomoLP, - symbolic_solver_labels=self.options.keepPyomoLP ) - yield '\t\t\t\t\t\t[%8.2f]\n' % duration() - SE.write( '\r[%8.2f]\n' % duration() ) - self.txt_file.write( '[%8.2f]\n' % duration() ) - # return signal handlers to defaults, again - signal(SIGINT, default_int_handler) - - # ... print the easier-to-read/parse format - msg = '[ ] Calculating reporting variables and formatting results.' - yield 'Calculating reporting variables and formatting results.' - SE.write( msg ); SE.flush() - self.txt_file.write( 'Calculating reporting variables and formatting results.') - self.instance.solutions.store_to(self.result) - formatted_results = pformat_results( self.instance, self.result, self.options ) - yield '\t[%8.2f]\n' % duration() - SE.write( '\r[%8.2f\n' % duration() ) - self.txt_file.write( '[%8.2f]\n' % duration() ) - yield formatted_results.getvalue() + '\n' - #SO.write( formatted_results.getvalue() ) - self.txt_file.write( formatted_results.getvalue() ) - if formatted_results.getvalue()=='No solution found.': - SE.write( formatted_results.getvalue() + '\n') - else: - yield '\r---------- Not solving: no available solver\n' - SE.write( '\r---------- Not solving: no available solver\n' ) - self.txt_file.write( '\r---------- Not solving: no available solver\n' ) - - except BaseException as model_exc: - yield "Exception found in solve_temoa_instance\n" - SE.write("Exception found in solve_temoa_instance\n") - self.txt_file.write("Exception found in solve_temoa_instance\n") - yield str(model_exc)+'\n' - SE.write(str(model_exc)) - self.txt_file.write(str(model_exc)) - raise model_exc - - def handle_files(self, log_name): - """Handle log and LP file assuming user called with config file or from interface.""" - if isinstance(self.options, TemoaConfig) and self.options.saveTEXTFILE: - for inpu in self.options.dot_dat: - file_ty = reg_exp.search(r"\b([\w-]+)\.(\w+)\b", inpu) - new_dir = self.options.path_to_data+os.sep+file_ty.group(1)+'_'+self.options.scenario+'_model' - if path.isfile(self.options.path_to_logs+os.sep+log_name) and path.exists(new_dir): - copyfile(self.options.path_to_logs+os.sep+log_name, new_dir+os.sep+self.options.scenario+'_OutputLog.log') - - if isinstance(self.options, TemoaConfig) and self.options.keepPyomoLP: - for inpu in self.options.dot_dat: - file_ty = reg_exp.search(r"\b([\w-]+)\.(\w+)\b", inpu) - - new_dir = self.options.path_to_data+os.sep+file_ty.group(1)+'_'+self.options.scenario+'_model' - - for files in os.listdir(self.options.path_to_lp_files): - if files.endswith(".lp"): - lpfile = files - else: - if files == "README.txt": - continue - os.remove(self.options.path_to_lp_files+os.sep+files) - - if path.exists(new_dir): - move(self.options.path_to_lp_files+os.sep+lpfile, new_dir+os.sep+self.options.scenario+'.lp') + def __init__(self, model, optimizer, options, txt_file): + self.model = model + self.options = options + self.optimizer = optimizer + self.txt_file = txt_file + + def create_temoa_instance(self): + """Create a single instance of Temoa.""" + + try: + if self.options.keepPyomoLP: + yield "\nSolver will write file: {}\n\n".format( + self.options.scenario + ".lp" + ) + SE.write( + "\nSolver will write file: {}\n\n".format( + self.options.scenario + ".lp" + ) + ) + self.txt_file.write( + "\nSolver will write file: {}\n\n".format( + self.options.scenario + ".lp" + ) + ) + + yield "Reading data files." + SE.write("[ ] Reading data files.") + SE.flush() + self.txt_file.write("Reading data files.") + begin = time() + duration = lambda: time() - begin + + modeldata = DataPortal(model=self.model) + # Recreate the pyomo command's ability to specify multiple "dot dat" files + # on the command lin + for fname in self.options.dot_dat: + if fname[-4:] != ".dat": + msg = "InputError: expecting a dot dat (e.g., data.dat) file, found '{}'\n" + raise Exception(msg.format(fname)) + modeldata.load(filename=fname) + yield "\t\t\t\t\t[%8.2f]\n" % duration() + SE.write("\r[%8.2f]\n" % duration()) + self.txt_file.write("[%8.2f]\n" % duration()) + + yield "Creating Temoa model instance." + SE.write("[ ] Creating Temoa model instance.") + SE.flush() + self.txt_file.write("Creating Temoa model instance.") + + self.model.dual = Suffix(direction=Suffix.IMPORT) + # self.model.rc = Suffix(direction=Suffix.IMPORT) + # self.model.slack = Suffix(direction=Suffix.IMPORT) + + self.instance = self.model.create_instance(modeldata) + yield "\t\t\t\t[%8.2f]\n" % duration() + SE.write("\r[%8.2f]\n" % duration()) + self.txt_file.write("[%8.2f]\n" % duration()) + + except Exception as model_exc: + yield "Exception found in create_temoa_instance\n" + SE.write("Exeception found in create_temoa_instance\n") + self.txt_file.write("Exception found in create_temoa_instance\n") + yield str(model_exc) + SE.write(str(model_exc)) + self.txt_file.write(str(model_exc)) + raise model_exc + + def solve_temoa_instance(self): + """Solve a Temoa instance.""" + + begin = time() + duration = lambda: time() - begin + try: + yield "Solving." + SE.write("[ ] Solving.") + SE.flush() + self.txt_file.write("Solving.") + if self.optimizer: + if self.options.neos: + self.result = self.optimizer.solve( + self.instance, opt=self.options.solver + ) + else: + self.result = self.optimizer.solve( + self.instance, + suffixes=["dual"], # 'rc', 'slack'], + keepfiles=self.options.keepPyomoLP, + symbolic_solver_labels=self.options.keepPyomoLP, + ) + yield "\t\t\t\t\t\t[%8.2f]\n" % duration() + SE.write("\r[%8.2f]\n" % duration()) + self.txt_file.write("[%8.2f]\n" % duration()) + # return signal handlers to defaults, again + signal(SIGINT, default_int_handler) + + # ... print the easier-to-read/parse format + msg = ( + "[ ] Calculating reporting variables and formatting results." + ) + yield "Calculating reporting variables and formatting results." + SE.write(msg) + SE.flush() + self.txt_file.write( + "Calculating reporting variables and formatting results." + ) + self.instance.solutions.store_to(self.result) + formatted_results = pformat_results( + self.instance, self.result, self.options + ) + yield "\t[%8.2f]\n" % duration() + SE.write("\r[%8.2f\n" % duration()) + self.txt_file.write("[%8.2f]\n" % duration()) + yield formatted_results.getvalue() + "\n" + # SO.write( formatted_results.getvalue() ) + self.txt_file.write(formatted_results.getvalue()) + if formatted_results.getvalue() == "No solution found.": + SE.write(formatted_results.getvalue() + "\n") + else: + yield "\r---------- Not solving: no available solver\n" + SE.write("\r---------- Not solving: no available solver\n") + self.txt_file.write("\r---------- Not solving: no available solver\n") + + except BaseException as model_exc: + yield "Exception found in solve_temoa_instance\n" + SE.write("Exception found in solve_temoa_instance\n") + self.txt_file.write("Exception found in solve_temoa_instance\n") + yield str(model_exc) + "\n" + SE.write(str(model_exc)) + self.txt_file.write(str(model_exc)) + raise model_exc + + def handle_files(self, log_name): + """Handle log and LP file assuming user called with config file or from interface.""" + if isinstance(self.options, TemoaConfig) and self.options.saveTEXTFILE: + for inpu in self.options.dot_dat: + file_ty = reg_exp.search(r"\b([\w-]+)\.(\w+)\b", inpu) + new_dir = ( + self.options.path_to_data + + os.sep + + file_ty.group(1) + + "_" + + self.options.scenario + + "_model" + ) + if path.isfile( + self.options.path_to_logs + os.sep + log_name + ) and path.exists(new_dir): + copyfile( + self.options.path_to_logs + os.sep + log_name, + new_dir + os.sep + self.options.scenario + "_OutputLog.log", + ) + + if isinstance(self.options, TemoaConfig) and self.options.keepPyomoLP: + for inpu in self.options.dot_dat: + file_ty = reg_exp.search(r"\b([\w-]+)\.(\w+)\b", inpu) + + new_dir = ( + self.options.path_to_data + + os.sep + + file_ty.group(1) + + "_" + + self.options.scenario + + "_model" + ) + + for files in os.listdir(self.options.path_to_lp_files): + if files.endswith(".lp"): + lpfile = files + else: + if files == "README.txt": + continue + os.remove(self.options.path_to_lp_files + os.sep + files) + + if path.exists(new_dir): + move( + self.options.path_to_lp_files + os.sep + lpfile, + new_dir + os.sep + self.options.scenario + ".lp", + ) def get_solvers(): - """Return the solvers avaiable on the system.""" - from logging import getLogger - - logger = getLogger('pyomo.solvers') - logger_status = logger.disabled - logger.disabled = True # no need for warnings: it's what we're testing! - - available_solvers = set() - try: - services = SF.services() # pyutilib version <= 5.6.3 - except RuntimeError as e: - services = SF # pyutilib version >= 5.6.4 - - for sname in services: - # initial underscore ('_'): Pyomo's method to mark non-public plugins - if '_' == sname[0]: continue - - solver = SF( sname ) - - try: - if not solver: continue - except ApplicationError as e: - continue - - if 'os' == sname: continue # Workaround current bug in Coopr - if not solver.available( exception_flag=False ): continue - available_solvers.add( sname ) - - logger.disabled = logger_status # put back the way it was. - - if available_solvers: - if 'cplex' in available_solvers: - default_solver = 'cplex' - elif 'gurobi' in available_solvers: - default_solver = 'gurobi' - elif 'cbc' in available_solvers: - default_solver = 'cbc' - elif 'glpk' in available_solvers: - default_solver = 'glpk' - else: - default_solver = iter(available_solvers).next() - else: - default_solver = 'NONE' - SE.write('\nNOTICE: Pyomo did not find any suitable solvers. Temoa will ' - 'not be able to solve any models. If you need help, ask on the ' - 'Temoa Project forum: http://temoaproject.org/\n\n' ) - - return (available_solvers, default_solver) - - - -def parse_args ( ): - """Parse arguments specfied from command line or in config file.""" - import argparse, sys - import os, re - from os.path import dirname, abspath - - available_solvers, default_solver = get_solvers() - - parser = argparse.ArgumentParser() - parser.prog = path.basename( argv[0].strip('/') ) - - parser.add_argument('dot_dat', - type=str, - nargs='*', - help='AMPL-format data file(s) with which to create a model instance. ' - 'e.g. "data.dat"' - ) - - parser.add_argument( '--path_to_logs', - help='Path to where debug logs will be generated by default. See folder debug_logs in data_files.', - action='store', - dest='path_to_logs', - default=re.sub('temoa_model$', 'data_files', dirname(abspath(__file__)))+os.sep+"debug_logs" - ) - - parser.add_argument( '--config', - help='Path to file containing configuration information.', - action='store', - dest='config', - default=None - ) - - parser.add_argument('--solver', - help="Which backend solver to use. See 'pyomo --help-solvers' for a list " - 'of solvers with which Pyomo can interface. The list shown here is ' - 'what Pyomo can currently find on this system. [Default: {}]' - .format(default_solver), - action='store', - choices=sorted(available_solvers), - dest='solver', - default=default_solver) - - options = parser.parse_args() - options.neos = False - - # Can't specify keeping the LP file without config file, so set this - # attribute to false - options.keepPyomoLP = False - - # If the user specifies the config flag, then call TemoaConfig and overwrite - # the argument parser above. - if options.config: - config_flag = 1 #flag indicates config file was used. - try: - temoa_config = TemoaConfig(d_solver=default_solver) - temoa_config.build(config=options.config) - SE.write(repr(temoa_config)) - options = temoa_config - SE.write('\nPlease press enter to continue or Ctrl+C to quit.\n') - #raw_input() # Give the user a chance to confirm input - if options.abort_temoa: - return - except KeyboardInterrupt: - SE.write('\n\nUser requested quit. Exiting Temoa ...\n') - raise SystemExit() - else: - config_flag = 0 #flag indicates config file was not used. - - s_choice = str( options.solver ).upper() - SE.write('Notice: Using the {} solver interface.\n'.format( s_choice )) - SE.flush() - - SE.write("Continue Operation? [Press enter to continue or CTRL+C to abort]\n") - SE.flush() - try: #make compatible with Python 2.7 or 3 - if os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location: - # - raw_input() # Give the user a chance to confirm input - except: - input() - - return options, config_flag + """Return the solvers avaiable on the system.""" + from logging import getLogger + + logger = getLogger("pyomo.solvers") + logger_status = logger.disabled + logger.disabled = True # no need for warnings: it's what we're testing! + + available_solvers = set() + try: + services = SF.services() # pyutilib version <= 5.6.3 + except RuntimeError as e: + services = SF # pyutilib version >= 5.6.4 + + for sname in services: + # initial underscore ('_'): Pyomo's method to mark non-public plugins + if "_" == sname[0]: + continue + + solver = SF(sname) + + try: + if not solver: + continue + except ApplicationError as e: + continue + + if "os" == sname: + continue # Workaround current bug in Coopr + if not solver.available(exception_flag=False): + continue + available_solvers.add(sname) + + logger.disabled = logger_status # put back the way it was. + + if available_solvers: + if "cplex" in available_solvers: + default_solver = "cplex" + elif "gurobi" in available_solvers: + default_solver = "gurobi" + elif "cbc" in available_solvers: + default_solver = "cbc" + elif "glpk" in available_solvers: + default_solver = "glpk" + else: + default_solver = iter(available_solvers).next() + else: + default_solver = "NONE" + SE.write( + "\nNOTICE: Pyomo did not find any suitable solvers. Temoa will " + "not be able to solve any models. If you need help, ask on the " + "Temoa Project forum: http://temoaproject.org/\n\n" + ) + + return (available_solvers, default_solver) + + +def parse_args(): + """Parse arguments specfied from command line or in config file.""" + import argparse, sys + import os, re + from os.path import dirname, abspath + + available_solvers, default_solver = get_solvers() + + parser = argparse.ArgumentParser() + parser.prog = path.basename(argv[0].strip("/")) + + parser.add_argument( + "dot_dat", + type=str, + nargs="*", + help="AMPL-format data file(s) with which to create a model instance. " + 'e.g. "data.dat"', + ) + + parser.add_argument( + "--path_to_logs", + help="Path to where debug logs will be generated by default. See folder debug_logs in data_files.", + action="store", + dest="path_to_logs", + default=re.sub("temoa_model$", "data_files", dirname(abspath(__file__))) + + os.sep + + "debug_logs", + ) + + parser.add_argument( + "--config", + help="Path to file containing configuration information.", + action="store", + dest="config", + default=None, + ) + + parser.add_argument( + "--solver", + help="Which backend solver to use. See 'pyomo --help-solvers' for a list " + "of solvers with which Pyomo can interface. The list shown here is " + "what Pyomo can currently find on this system. [Default: {}]".format( + default_solver + ), + action="store", + choices=sorted(available_solvers), + dest="solver", + default=default_solver, + ) + + options = parser.parse_args() + options.neos = False + + # Can't specify keeping the LP file without config file, so set this + # attribute to false + options.keepPyomoLP = False + + # If the user specifies the config flag, then call TemoaConfig and overwrite + # the argument parser above. + if options.config: + config_flag = 1 # flag indicates config file was used. + try: + temoa_config = TemoaConfig(d_solver=default_solver) + temoa_config.build(config=options.config) + SE.write(repr(temoa_config)) + options = temoa_config + SE.write("\nPlease press enter to continue or Ctrl+C to quit.\n") + # raw_input() # Give the user a chance to confirm input + if options.abort_temoa: + return + except KeyboardInterrupt: + SE.write("\n\nUser requested quit. Exiting Temoa ...\n") + raise SystemExit() + else: + config_flag = 0 # flag indicates config file was not used. + + s_choice = str(options.solver).upper() + SE.write("Notice: Using the {} solver interface.\n".format(s_choice)) + SE.flush() + + SE.write("Continue Operation? [Press enter to continue or CTRL+C to abort]\n") + SE.flush() + try: # make compatible with Python 2.7 or 3 + if ( + os.path.join("temoa_model", "config_sample_myopic") + not in options.file_location + ): + # + raw_input() # Give the user a chance to confirm input + except: + input() + + return options, config_flag diff --git a/temoa_model/temoa_stochastic.py b/temoa_model/temoa_stochastic.py index 64d6c51a..c05ab63f 100644 --- a/temoa_model/temoa_stochastic.py +++ b/temoa_model/temoa_stochastic.py @@ -27,8 +27,7 @@ from temoa_run import parse_args from pformat_results import pformat_results from pyomo.environ import * -from pyomo.pysp.scenariotree.manager import \ - ScenarioTreeManagerClientSerial +from pyomo.pysp.scenariotree.manager import ScenarioTreeManagerClientSerial from pyomo.pysp.ef import create_ef_instance from pyomo.opt import SolverFactory from time import time @@ -38,88 +37,96 @@ def return_CP_and_path(p_data): # return_CP_and_path(p_data) -> dict(), dict() - # This function reads the path to the instance directory (p_data) and - # returns conditional two dictionaries, the first one is the conditional + # This function reads the path to the instance directory (p_data) and + # returns conditional two dictionaries, the first one is the conditional # probability of a scenario, the second one is the path to all files of a # scenario. from collections import deque, defaultdict + # from pyomo.pysp.util.scenariomodels import scenario_tree_model - from pyomo.pysp.scenariotree.tree_structure_model import \ - CreateAbstractScenarioTreeModel + from pyomo.pysp.scenariotree.tree_structure_model import ( + CreateAbstractScenarioTreeModel, + ) pwd = os.getcwd() os.chdir(p_data) - s2fp_dict = defaultdict(deque) # Scenario to 'file path' dictionary, .dat not included - s2cd_dict = defaultdict(float) # Scenario to conditonal density mapping + s2fp_dict = defaultdict( + deque + ) # Scenario to 'file path' dictionary, .dat not included + s2cd_dict = defaultdict(float) # Scenario to conditonal density mapping # sStructure = scenario_tree_model.create_instance( filename='ScenarioStructure.dat' ) - sStructure = CreateAbstractScenarioTreeModel().create_instance( filename='ScenarioStructure.dat' ) + sStructure = CreateAbstractScenarioTreeModel().create_instance( + filename="ScenarioStructure.dat" + ) # The following code is borrowed from Kevin's temoa_lib.py ########################################################################### # Step 1: find the root node. PySP doesn't make this very easy ... - + # a child -> parent mapping, because every child has only one parent, but # not vice-versa - ctpTree = dict() # Child to parent dict, one to one mapping - + ctpTree = dict() # Child to parent dict, one to one mapping + to_process = deque() - to_process.extend( sStructure.Children.keys() ) + to_process.extend(sStructure.Children.keys()) while to_process: node = to_process.pop() if node in sStructure.Children: # it's a parent! - new_nodes = set( sStructure.Children[ node ] ) - to_process.extend( new_nodes ) - ctpTree.update({n : node for n in new_nodes }) - + new_nodes = set(sStructure.Children[node]) + to_process.extend(new_nodes) + ctpTree.update({n: node for n in new_nodes}) + # parents - children - root_node = (set( ctpTree.values() ) - set( ctpTree.keys() )).pop() - + root_node = (set(ctpTree.values()) - set(ctpTree.keys())).pop() + # ptcTree = defaultdict( list ) # Parent to child node, one to multiple mapping # for c, p in ctpTree.items(): # ptcTree[ p ].append( c ) # ptcTree = dict( ptcTree ) # be slightly defensive; catch any additions - + # leaf_nodes = set(ctpTree.keys()) - set(ctpTree.values()) # leaf_nodes = set(sStructure.ScenarioLeafNode.values()) # Try to hack Kevin's code - leaf_nodes = sStructure.ScenarioLeafNode.values() # Try to hack Kevin's code + leaf_nodes = sStructure.ScenarioLeafNode.values() # Try to hack Kevin's code leaf_nodes_names = list() for n in leaf_nodes: leaf_nodes_names.append(n.value) leaf_nodes_names = set(leaf_nodes_names) - - scenario_nodes = dict() # Map from leafnode to 'node path' - for node in leaf_nodes_names: # e.g.: {Rs0s0: [R, Rs0, Rs0s0]} + + scenario_nodes = dict() # Map from leafnode to 'node path' + for node in leaf_nodes_names: # e.g.: {Rs0s0: [R, Rs0, Rs0s0]} s = deque() - scenario_nodes[ node ] = s + scenario_nodes[node] = s while node in ctpTree: - s.append( node ) - node = ctpTree[ node ] - s.append( node ) + s.append(node) + node = ctpTree[node] + s.append(node) s.reverse() ########################################################################### for s in sStructure.Scenarios: - cp = 1.0 # Starting probability - for n in scenario_nodes[value( sStructure.ScenarioLeafNode[s]) ]: - cp = cp*value( sStructure.ConditionalProbability[n] ) + cp = 1.0 # Starting probability + for n in scenario_nodes[value(sStructure.ScenarioLeafNode[s])]: + cp = cp * value(sStructure.ConditionalProbability[n]) if not sStructure.ScenarioBasedData.value: - s2fp_dict[s].append(n + '.dat') + s2fp_dict[s].append(n + ".dat") s2cd_dict[s] = cp - + from pyomo.core import Objective + if sStructure.ScenarioBasedData.value: for s in sStructure.Scenarios: - s2fp_dict[s].append(s + '.dat') + s2fp_dict[s].append(s + ".dat") os.chdir(pwd) return (s2cd_dict, s2fp_dict) -def solve_ef(p_model, p_data, temoa_options = None): + +def solve_ef(p_model, p_data, temoa_options=None): """ solve_ef(p_model, p_data) -> objective value of the extensive form - Solves the model in stochastic mode. - p_model -> string, the path to the model file (ReferenceModel.py). + Solves the model in stochastic mode. + p_model -> string, the path to the model file (ReferenceModel.py). p_data -> string, the path to the directory of data for the stochastic mdoel, where ScenarioStructure.dat should resides. Returns a float point number of the value of objective function for the @@ -128,10 +135,10 @@ def solve_ef(p_model, p_data, temoa_options = None): options = ScenarioTreeManagerClientSerial.register_options() - if os.path.basename(p_model) == 'ReferenceModel.py': + if os.path.basename(p_model) == "ReferenceModel.py": options.model_location = os.path.dirname(p_model) else: - sys.stderr.write('\nModel file should be ReferenceModel.py. Exiting...\n') + sys.stderr.write("\nModel file should be ReferenceModel.py. Exiting...\n") sys.exit(1) options.scenario_tree_location = p_data @@ -139,18 +146,18 @@ def solve_ef(p_model, p_data, temoa_options = None): # manager.close() and gracefully shutdown with ScenarioTreeManagerClientSerial(options) as manager: manager.initialize() - - ef_instance = create_ef_instance(manager.scenario_tree, - verbose_output=options.verbose) - + + ef_instance = create_ef_instance( + manager.scenario_tree, verbose_output=options.verbose + ) + ef_instance.dual = Suffix(direction=Suffix.IMPORT) - + with SolverFactory(temoa_options.solver) as opt: - ef_result = opt.solve(ef_instance) # Write to database - if hasattr(temoa_options, 'output'): + if hasattr(temoa_options, "output"): sys.path.append(options.model_location) # from temoa_config import TemoaConfig @@ -160,50 +167,55 @@ def solve_ef(p_model, p_data, temoa_options = None): # temoa_options.saveTEXTFILE = temoa_options.saveTEXTFILE # temoa_options.path_to_data = temoa_options.path_to_data # temoa_options.saveEXCEL = temoa_options.saveEXCEL - ef_result.solution.Status = 'feasible' # Assume it is feasible - # Maybe there is a better solution using manager, but now it is a + ef_result.solution.Status = "feasible" # Assume it is feasible + # Maybe there is a better solution using manager, but now it is a # kludge to use return_CP_and_path() function s2cd_dict, s2fp_dict = return_CP_and_path(p_data) - stochastic_run = temoa_options.scenario # Name of stochastic run + stochastic_run = temoa_options.scenario # Name of stochastic run for s in manager.scenario_tree.scenarios: ins = s._instance - temoa_options.scenario = '.'.join( [stochastic_run, s.name] ) + temoa_options.scenario = ".".join([stochastic_run, s.name]) temoa_options.dot_dat = list() for fname in s2fp_dict[s.name]: temoa_options.dot_dat.append( os.path.join(options.scenario_tree_location, fname) ) # temoa_options.output = os.path.join( - # options.scenario_tree_location, + # options.scenario_tree_location, # stochastic_output # ) - msg = '\nStoring results from scenario {} to database.\n'.format(s.name) + msg = "\nStoring results from scenario {} to database.\n".format(s.name) sys.stderr.write(msg) - formatted_results = pformat_results( ins, ef_result, temoa_options ) + formatted_results = pformat_results(ins, ef_result, temoa_options) - ef_instance.solutions.store_to( ef_result ) - ef_obj = value( ef_instance.EF_EXPECTED_COST.values()[0] ) + ef_instance.solutions.store_to(ef_result) + ef_obj = value(ef_instance.EF_EXPECTED_COST.values()[0]) return ef_obj -def StochasticPointObjective_rule ( M, p ): - expr = ( M.StochasticPointCost[ p ] == PeriodCost_rule( M, p ) ) + +def StochasticPointObjective_rule(M, p): + expr = M.StochasticPointCost[p] == PeriodCost_rule(M, p) return expr -def Objective_rule ( M ): - return sum( M.StochasticPointCost[ pp ] for pp in M.time_optimize ) -M = model = temoa_create_model( 'TEMOA Stochastic' ) +def Objective_rule(M): + return sum(M.StochasticPointCost[pp] for pp in M.time_optimize) + + +M = model = temoa_create_model("TEMOA Stochastic") -M.StochasticPointCost = Var( M.time_optimize, within=NonNegativeReals ) -M.StochasticPointCostConstraint = Constraint( M.time_optimize, rule=StochasticPointObjective_rule ) +M.StochasticPointCost = Var(M.time_optimize, within=NonNegativeReals) +M.StochasticPointCostConstraint = Constraint( + M.time_optimize, rule=StochasticPointObjective_rule +) del M.TotalCost -M.TotalCost = Objective( rule=Objective_rule, sense=minimize ) +M.TotalCost = Objective(rule=Objective_rule, sense=minimize) if __name__ == "__main__": p_model = "./ReferenceModel.py" temoa_options, config_flag = parse_args() - p_dot_dat = temoa_options.dot_dat[0] # must be ScenarioStructure.dat + p_dot_dat = temoa_options.dot_dat[0] # must be ScenarioStructure.dat p_data = os.path.dirname(p_dot_dat) print(p_model, p_data) print(solve_ef(p_model, p_data, temoa_options))