diff --git a/doc/src/examples.rst b/doc/src/examples.rst index 4b060fdb7..98f9ee3ce 100644 --- a/doc/src/examples.rst +++ b/doc/src/examples.rst @@ -392,3 +392,88 @@ previous methods: We see that, for this toy example, the L-shaped method has converged to the optimal solution within just 10 iterations. + + +aircond +------- + +This is fairly complicated example because it is multi-stage and the +model itself offers a lot of flexibility. The aircond example is +unusual in that the model file, ``aircond.py``, lives in +``mpisppy.tests.examples`` directory. Scripts and bash files that use +it live in ``examples.aircond``. A good place to start is the +``aircond_cylinders.py`` file that starts with some functions that +support the main program. The main program makes use of the command +line parsers provided with the library supplemented by arguments +provided by the aircond reference model using the line + +:: + + parser = aircond.inparsers_adder(parser) + + + The ``args`` obtained by the parser are passed directly to the vanilla hub + and spoke creator which knows how to use the arguments from the ``baseparsers``. + The arguments unique to aircond are processed by the ``create_kwargs`` function + in the reference model file. + + A simple example that uses a few of the options is shown in ``aircond_zhat.bash``, which + also calls the ``xhat4xhat`` program to estimate confidence intervals for the solution + obtained. + + + hydro + ----- + +Hydro is a three stage example that was originally coded in PySP and we make extensive use +of the PySP files. Unlike farmer and aircond where the scenario data are created from distributions, +for this problem the scenario data are provided in files. + +Using PySPModel +^^^^^^^^^^^^^^^ +In the file ``hydro_cylinders_pysp.py`` the lines + +:: + + from mpisppy.utils.pysp_model import PySPModel + ... + hydro = PySPModel("./PySP/models/", "./PySP/nodedata/") + +cause an object called ``hydro`` to be created that has the methods needed by vanilla and the hub and +spoke creators as can be seen in the ``main`` function of ``hydro_cylinders_pysp.py``. + + +Not using PySPModel +^^^^^^^^^^^^^^^^^^^ + +In the file ``hydro_cylinders.py`` the file ``hydro.py`` is imported because it provides the functions +needed by vanilla hub and spoke creators. + + +netdes +------ + +This is a very challenging network design problem, which has many instances each defined by a data file. +For this problem, cross scenario cuts are helpful +so the use of that spoke is illustrated in ``netdes_cylinders.py``. + +sslp +---- + +This is a classic problem from Ntaimo and Sen with data in PySP format +so the driver code (e.g., ``sslp_cylinders.py`` that makes use of ``sslp.py``) is somewhat similar to the +hydro example except sslp is simpler because it is just two stages. + +UC +-- + +This example uses the ``egret`` package for the underlying unit commitment model +and reads PySP format data using the ``pyomo`` dataportal. Data files for a variety +of numbers of scenarios are provided. + +sizes +----- + +The sizes example (Jorjani et al, IJPR, 1999) is a two-stage problem with general integers in each stage. The file +``sizes_cylinders.py`` is the usual cylinders driver. There are other examples in the directory, such +as ``sizes_demo.py``, which provides an example of serial execution (no cylinders). diff --git a/doc/src/hubs.rst b/doc/src/hubs.rst index 8978f3cfd..7b79e3921 100644 --- a/doc/src/hubs.rst +++ b/doc/src/hubs.rst @@ -56,3 +56,26 @@ APH The implementation of Asynchronous Projective Hedging is described in a forthcoming paper. + +Hub Convergers +-------------- + +Execution of mpi-sppy programs can be terminated based on a variety of criteria. +The simplest is hub iteration count and the most common is probably relative +gap between upper and lower bounds. It is also possible to terminate +based on convergence within the hub; furthermore, convergence metrics within +the hub can be helpful for tuning algorithms. + +Unfortunately, the word "converger" is also used to describe spokes that return bounds +for the purpose of measuring overall convergence (as opposed to convergence within the hub +algorithm.) + +.. + The scenario decomposition methods (PH and APH) allow for user written + convergence metrics as plug-ins. A pattern that can be followed is shown + in the farmer example. The ``farmer_cylinders.py`` file can have:: + + from mpisppy.convergers.norm_rho_converger import NormRhoConverger + ... + xxxxxx + diff --git a/doc/src/spokes.rst b/doc/src/spokes.rst index 7905c710b..73a0f20b6 100644 --- a/doc/src/spokes.rst +++ b/doc/src/spokes.rst @@ -72,8 +72,8 @@ solutions can be tried before receiving new x values from the hub. This spoke also supports multistage problems. It does not try every subproblem, but shuffles the scenarios and loops over the shuffled list. At each step, it takes the first-stage solution specified by a scenario, - and then uses the scenarios that follows in the shuffled loop to get the - values of the non-first-stage variables that were not fixed before. +and then uses the scenarios that follows in the shuffled loop to get the +values of the non-first-stage variables that were not fixed before. slam_heuristic ^^^^^^^^^^^^^^ @@ -97,7 +97,7 @@ spoke_sleep_time This is an advanced topic and rarely encountered. In some settings, particularly with small sub-problems, it is possible for -ranks within spokes to become ``of of sync.'' The most common manifestation of this +ranks within spokes to become of of sync. The most common manifestation of this is that some ranks do not see the kill signal and sit in a busy-wait I/O loop until something external kills them; but it can also be the case that Lagrangian bound spokes start operating on data from different hub iterations; they should notice diff --git a/examples/aircond/aircond_cylinders.py b/examples/aircond/aircond_cylinders.py index 657e40b9e..f10e9ecb9 100644 --- a/examples/aircond/aircond_cylinders.py +++ b/examples/aircond/aircond_cylinders.py @@ -152,8 +152,8 @@ def main(): ScenCount = np.prod(BFs) #ScenCount = _get_num_leaves(BFs) - scenario_creator_kwargs = {"branching_factors": BFs, - "start_ups":args.start_ups} + sc_options = {"args": args} + scenario_creator_kwargs = aircond.kw_creator(sc_options) all_scenario_names = [f"scen{i}" for i in range(ScenCount)] #Scens are 0-based # print(all_scenario_names) @@ -203,16 +203,18 @@ def main(): wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() + fname = 'aircond_cyl_nonants.npy' if wheel.global_rank == 0: print("BestInnerBound={} and BestOuterBound={}".\ format(wheel.BestInnerBound, wheel.BestOuterBound)) - - + if write_solution: + print(f"Writing first stage solution to {fname}") + # all ranks need to participate because only the winner will write if write_solution: wheel.write_first_stage_solution('aircond_first_stage.csv') wheel.write_tree_solution('aircond_full_solution') - wheel.write_first_stage_solution('aircond_cyl_nonants.npy', - first_stage_solution_writer=first_stage_nonant_npy_serializer) + wheel.write_first_stage_solution(fname, + first_stage_solution_writer=first_stage_nonant_npy_serializer) if __name__ == "__main__": main() diff --git a/examples/aircond/aircond_slow.bash b/examples/aircond/aircond_slow.bash new file mode 100644 index 000000000..17510884d --- /dev/null +++ b/examples/aircond/aircond_slow.bash @@ -0,0 +1,8 @@ +#!/bin/bash + +SOLVERNAME="cplex" + +# get an xhat +# xhat output file name is hardwired +mpiexec --oversubscribe -np 3 python -m mpi4py aircond_cylinders.py --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --solver-name=${SOLVERNAME} --branching-factors 4 3 2 --Capacity 200 --QuadShortCoeff 0.3 --start-ups --BeginInventory 50 --rel-gap 0.01 + diff --git a/examples/aircond/aircond_zhat.bash b/examples/aircond/aircond_zhat.bash index c39045038..d789fcab7 100644 --- a/examples/aircond/aircond_zhat.bash +++ b/examples/aircond/aircond_zhat.bash @@ -1,10 +1,11 @@ #!/bin/bash -SOLVERNAME="gurobi_persistent" +#SOLVERNAME="gurobi_persistent" +SOLVERNAME="cplex" # get an xhat # xhat output file name is hardwired -mpiexec --oversubscribe -np 9 python -m mpi4py aircond_cylinders.py --branching-factors 5 4 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1.0 --solver-name=${SOLVERNAME} --rel-gap=0.001 --abs-gap=2 --start-ups +mpiexec --oversubscribe -np 9 python -m mpi4py aircond_cylinders.py --branching-factors 5 4 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1.0 --solver-name=${SOLVERNAME} --rel-gap=0.001 --abs-gap=2 -#echo "starting zhat4xhat" -#python -m mpisppy.confidence_intervals.zhat4xhat mpisppy.tests.examples.aircond aircond_cyl_nonants.npy --solver-name ${SOLVERNAME} --branching-factors 4 3 2 --num-samples 5 --confidence-level 0.95 +echo "starting zhat4xhat" +python -m mpisppy.confidence_intervals.zhat4xhat mpisppy.tests.examples.aircond aircond_cyl_nonants.npy --solver-name ${SOLVERNAME} --branching-factors 4 3 2 --num-samples 10 --confidence-level 0.95 diff --git a/examples/aircond/aircond_zhatII.bash b/examples/aircond/aircond_zhatII.bash new file mode 100644 index 000000000..0e71a459d --- /dev/null +++ b/examples/aircond/aircond_zhatII.bash @@ -0,0 +1,11 @@ +#!/bin/bash + +SOLVERNAME="cplex" +SAMPLES=3 + +# get an xhat +# xhat output file name is hardwired +mpiexec --oversubscribe -np 3 python3 -m mpi4py aircond_cylinders.py --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name=${SOLVERNAME} --branching-factors 4 3 2 --Capacity 200 --QuadShortCoeff 0.3 --start-ups --BeginInventory 50 + +echo "starting zhat4xhat with ${SAMPLES} samples" +python -m mpisppy.confidence_intervals.zhat4xhat mpisppy.tests.examples.aircond aircond_cyl_nonants.npy --solver-name ${SOLVERNAME} --branching-factors 4 3 2 --num-samples ${SAMPLES} --confidence-level 0.95 --Capacity 200 --QuadShortCoeff 0.3 --start-ups --BeginInventory 50 diff --git a/examples/sizes/sizes_demo.py b/examples/sizes/sizes_demo.py index 3df009d1b..82d070866 100644 --- a/examples/sizes/sizes_demo.py +++ b/examples/sizes/sizes_demo.py @@ -181,14 +181,15 @@ scenario_creator, scenario_denouement, scenario_creator_kwargs={"scenario_count": ScenCount}, + extensions=MinMaxAvg, + PH_converger=None, + rho_setter=None, ) ph.options["PHIterLimit"] = 3 from mpisppy.extensions.avgminmaxer import MinMaxAvg options["avgminmax_name"] = "FirstStageCost" - conv, obj, bnd = ph.ph_main(extensions=MinMaxAvg, - PH_converger=None, - rho_setter=None) + conv, obj, bnd = ph.ph_main() diff --git a/mpisppy/confidence_intervals/multi_seqsampling.py b/mpisppy/confidence_intervals/multi_seqsampling.py index 2ae5e557c..6e6bfe403 100644 --- a/mpisppy/confidence_intervals/multi_seqsampling.py +++ b/mpisppy/confidence_intervals/multi_seqsampling.py @@ -178,6 +178,8 @@ def _gap_estimators_with_independent_scenarios(self, xhat_k, nk, (TBD: drop this arg and just use the function in refmodel) Returns: Gk, Sk (float): mean and standard devation of the gap estimate + Note: + Seed management is mainly in the form of updates to SeedCount """ ama_options = self.options.copy() @@ -187,13 +189,14 @@ def _gap_estimators_with_independent_scenarios(self, xhat_k, nk, ama_options['EF_solver_options']= self.solver_options ama_options['num_scens'] = nk ama_options['_mpisppy_probability'] = 1/nk #Probably not used + ama_options['start_seed'] = self.SeedCount pseudo_branching_factors = [nk]+[1]*(self.numstages-2) ama_options['branching_factors'] = pseudo_branching_factors ama = amalgomator.Amalgomator(options=ama_options, scenario_names=estimator_scenario_names, scenario_creator=self.refmodel.scenario_creator, - kw_creator=self._kw_creator_without_seed, + kw_creator=self.refmodel.kw_creator, scenario_denouement=scenario_denouement) ama.run() #Optimal solution of the approximate problem diff --git a/mpisppy/confidence_intervals/sample_tree.py b/mpisppy/confidence_intervals/sample_tree.py index f60c179b0..4b79f3f9a 100644 --- a/mpisppy/confidence_intervals/sample_tree.py +++ b/mpisppy/confidence_intervals/sample_tree.py @@ -122,7 +122,13 @@ def _create_amalgomator(self): if self.solver_options is not None: ama_options['EF_solver_options']= self.solver_options ama_options['num_scens'] = self.numscens + if "start_seed" not in ama_options: + ama_options["start_seed"] = self.seed ama_options['_mpisppy_probability'] = 1/self.numscens #Probably not used + # TBD: better options flow (Dec 2021) + if "branching_factors" not in ama_options: + if "kwargs" in ama_options: + ama_options["branching_factors"] = ama_options["kwargs"]["branching_factors"] scen_names = self.refmodel.scenario_names_creator(self.numscens, start=self.seed) diff --git a/mpisppy/tests/examples/aircond.py b/mpisppy/tests/examples/aircond.py index 57782f6cc..9efc7eb8b 100644 --- a/mpisppy/tests/examples/aircond.py +++ b/mpisppy/tests/examples/aircond.py @@ -1,6 +1,5 @@ #ReferenceModel for full set of scenarios for AirCond; June 2021 - - +# Dec 2021; numerous enhancements by DLW; do not change defaults import pyomo.environ as pyo import numpy as np import time @@ -12,23 +11,43 @@ # Use this random stream: aircondstream = np.random.RandomState() +# Do not edit these defaults! +parms = {"mudev": (float, 0.), + "sigmadev": (float, 40.), + "start_ups": (bool, False), + "StartUpCost": (float, 300.), + "start_seed": (int, 1134), + "min_d": (float, 0.), + "max_d": (float, 400.), + "starting_d": (float, 200.), + "BeginInventory": (float, 200.), + "InventoryCost": (float, 0.5), + "LastInventoryCost": (float, -0.8), + "Capacity": (float, 200.), + "RegularProdCost": (float, 1.), + "OvertimeProdCost": (float, 3.), + "NegInventoryCost": (float, -5.), + "QuadShortCoeff": (float, 0) +} + +def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs): + + if "start_seed" not in kwargs: + raise RuntimeError(f"start_seed not in kwargs; {kwargs =}") + start_seed = kwargs["start_seed"] + max_d = kwargs.get("max_d", 400) + min_d = kwargs.get("min_d", 0) + mudev = kwargs.get("mudev", None) + sigmadev = kwargs.get("sigmadev", None) -def _demands_creator(sname,branching_factors,start_seed, mudev, sigmadev, - starting_d=200,root_name="ROOT"): - - max_d = 400 - min_d = 0 - - if branching_factors is None: - raise RuntimeError("scenario_creator for aircond needs branching_factors") scennum = sputils.extract_num(sname) # Find the right path and the associated seeds (one for each node) using scennum - prod = np.prod(branching_factors) + prod = np.prod(sample_branching_factors) s = int(scennum % prod) - d = starting_d + d = kwargs.get("starting_d", 200) demands = [d] nodenames = [root_name] - for bf in branching_factors: + for bf in sample_branching_factors: assert prod%bf == 0 prod = prod//bf nodenames.append(str(s//prod)) @@ -36,7 +55,7 @@ def _demands_creator(sname,branching_factors,start_seed, mudev, sigmadev, stagelist = [int(x) for x in nodenames[1:]] for t in range(1,len(nodenames)): - aircondstream.seed(start_seed+sputils.node_idx(stagelist[:t],branching_factors)) + aircondstream.seed(start_seed+sputils.node_idx(stagelist[:t],sample_branching_factors)) d = min(max_d,max(min_d,d+aircondstream.normal(mudev,sigmadev))) demands.append(d) @@ -60,34 +79,43 @@ def dual_rho_setter(scenario_instance): def primal_rho_setter(scenario_instance): return general_rho_setter(scenario_instance, rho_scale_factor=0.01) -def _StageModel_creator(time, demand, last_stage=False, start_ups=None): - # create a single stage of an aircond model + +def _StageModel_creator(time, demand, last_stage, **kwargs): + # create a single stage of an aircond model; do not change defaults (Dec 2021) + + def _kw(pname): + return kwargs.get(pname, parms[pname][1]) + model = pyo.ConcreteModel() model.T = [time] #Parameters #Demand model.Demand = demand - #Inventory Cost - model.InventoryCost = -0.8 if last_stage else 0.5 + #Inventory Cost: model.InventoryCost = -0.8 if last_stage else 0.5 + if last_stage: + model.InventoryCost = _kw("LastInventoryCost") + else: + model.InventoryCost = _kw("InventoryCost") #Regular Capacity - model.Capacity = 200 + model.Capacity = _kw("Capacity") #Regular Production Cost - model.RegularProdCost = 1 + model.RegularProdCost = _kw("RegularProdCost") #Overtime Production Cost - model.OvertimeProdCost = 3 + model.OvertimeProdCost = _kw("OvertimeProdCost") model.max_T = 25 # this is for the start-up cost constraints, model.bigM = model.Capacity * model.max_T - model.start_ups = start_ups + model.start_ups = _kw("start_ups") if model.start_ups: # Start-up Cost - model.StartUpCost = 300 # TBD: give the user easier control + model.StartUpCost = _kw("StartUpCost") # Negative Inventory Cost - model.NegInventoryCost = -5 + model.NegInventoryCost = _kw("NegInventoryCost") + model.QuadShortCoeff = _kw("QuadShortCoeff") #Variables model.RegularProd = pyo.Var(domain=pyo.NonNegativeReals, @@ -103,9 +131,7 @@ def _StageModel_creator(time, demand, last_stage=False, start_ups=None): # start-up cost variable model.StartUp = pyo.Var(within=pyo.Binary) - model.InventoryAux = pyo.Var(domain=pyo.NonNegativeReals) - - #Constraints + #Constraints (and some auxilliary variables) def CapacityRule(m): return m.RegularProd<=m.Capacity model.MaximumCapacity = pyo.Constraint(rule=CapacityRule) @@ -114,16 +140,39 @@ def CapacityRule(m): model.RegStartUpConstraint = pyo.Constraint( expr=model.bigM * model.StartUp >= model.RegularProd + model.OvertimeProd) - # for max function - model.NegInventoryConstraint = pyo.ConstraintList() - model.NegInventoryConstraint.add(model.InventoryAux >= model.InventoryCost*model.Inventory) - model.NegInventoryConstraint.add(model.InventoryAux >= model.NegInventoryCost*model.Inventory) + """ + # for max function using only one Var and two constraints + model.InventoryAux = pyo.Var(domain=pyo.NonNegativeReals) # to add to objective + + model.InventoryCostConstraint = pyo.ConstraintList() + model.InventoryCostConstraint.add(model.InventoryAux >= model.InventoryCost*model.Inventory) + model.InventoryCostConstraint.add(model.InventoryAux >= model.NegInventoryCost*model.Inventory) + """ + + # grab positive and negative parts of inventory + if not last_stage: + assert model.InventoryCost > 0, model.InventoryCost + assert model.NegInventoryCost < 0, model.NegInventoryCost + model.negInventory = pyo.Var(domain=pyo.NonNegativeReals, initialize=0.0) + model.posInventory = pyo.Var(domain=pyo.NonNegativeReals, initialize=0.0) + model.doleInventory = pyo.Constraint(expr=model.Inventory == model.posInventory - model.negInventory) + model.LinInvenCostExpr = pyo.Expression(expr = model.InventoryCost*model.posInventory - model.NegInventoryCost*model.negInventory) + else: # in the last stage we let the negative "cost" take care of both sides + assert model.InventoryCost < 0, f"last stage inven cost: {model.InventoryCost}" + model.LinInvenCostExpr = pyo.Expression(expr = model.InventoryCost*model.Inventory) + + if model.QuadShortCoeff > 0 and not last_stage: + model.InvenCostExpr = pyo.Expression(expr = model.LinInvenCostExpr +\ + model.QuadShortCoeff * model.negInventory * model.negInventory) + else: + assert model.QuadShortCoeff >= 0, model.QuadShortCoeff + model.InvenCostExpr = pyo.Expression(expr = model.LinInvenCostExpr) #Objective def stage_objective(m): expr = m.RegularProdCost*m.RegularProd +\ m.OvertimeProdCost*m.OvertimeProd +\ - m.InventoryAux + m.InvenCostExpr if m.start_ups: expr += m.StartUpCost * m.StartUp @@ -134,8 +183,14 @@ def stage_objective(m): return model #Assume that demands has been drawn before -def aircond_model_creator(demands, start_ups=None): +def aircond_model_creator(demands, **kwargs): # create a single aircond model for the given demands + # branching_factors=None, num_scens=None, mudev=0, sigmadev=40, start_seed=0, start_ups=None): + # typing aids... + start_seed = kwargs["start_seed"] + + start_ups = kwargs.get("start_ups", parms["start_ups"][1]) + model = pyo.ConcreteModel() num_stages = len(demands) model.T = range(1,num_stages+1) #Stages 1,2,...,T @@ -147,14 +202,14 @@ def aircond_model_creator(demands, start_ups=None): model.start_ups = start_ups #Parameters - model.BeginInventory = 100 + model.BeginInventory = kwargs.get("BeginInventory", parms["BeginInventory"][1]) #Creating stage models model.stage_models = {} for t in model.T: last_stage = (t==num_stages) model.stage_models[t] = _StageModel_creator(t, demands[t-1], - last_stage=last_stage, start_ups=model.start_ups) + last_stage=last_stage, **kwargs) #Constraints @@ -179,7 +234,7 @@ def material_balance_rule(m,t): def stage_cost(stage_m): expr = stage_m.RegularProdCost*stage_m.RegularProd +\ stage_m.OvertimeProdCost*stage_m.OvertimeProd +\ - stage_m.InventoryAux + stage_m.InvenCostExpr if stage_m.start_ups: expr += stage_m.StartUpCost * stage_m.StartUp return expr @@ -207,7 +262,6 @@ def MakeNodesforScen(model,nodenames,branching_factors,starting_stage=1): nonant_ef_suppl_list = [model.stage_models[stage].Inventory] if model.start_ups: nonant_ef_suppl_list.append(model.stage_models[stage].StartUp) - nonant_ef_suppl_list.append(model.stage_models[stage].InventoryAux) if stage ==1: ndn="ROOT" @@ -252,16 +306,17 @@ def MakeNodesforScen(model,nodenames,branching_factors,starting_stage=1): return(TreeNodes) -def scenario_creator(sname, branching_factors=None, num_scens=None, mudev=0, sigmadev=40, start_seed=0, start_ups=None): - if branching_factors is None: - raise RuntimeError("scenario_creator for aircond needs branching_factors") +def scenario_creator(sname, **kwargs): + + start_seed = kwargs['start_seed'] + if "branching_factors" not in kwargs: + raise RuntimeError("scenario_creator for aircond needs branching_factors in kwargs") + branching_factors = kwargs["branching_factors"] - demands,nodenames = _demands_creator(sname, branching_factors, start_seed, mudev, sigmadev) + demands,nodenames = _demands_creator(sname, branching_factors, root_name="ROOT", **kwargs) - model = aircond_model_creator(demands, start_ups=start_ups) + model = aircond_model_creator(demands, **kwargs) - if num_scens is not None: - model._mpisppy_probability = 1/num_scens #Constructing the nodes used by the scenario model._mpisppy_node_list = MakeNodesforScen(model, nodenames, branching_factors) @@ -276,7 +331,7 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, sname (string): scenario name to be created stage (int >=1 ): for stages > 1, fix data based on sname in earlier stages sample_branching_factors (list of ints): branching factors for the sample tree - seed (int): To allow randome sampling (for some problems, it might be scenario offset) + seed (int): To allow random sampling (for some problems, it might be scenario offset) given_scenario (Pyomo concrete model): if not None, use this to get data for ealier stages scenario_creator_kwargs (dict): keyword args for the standard scenario creator funcion Returns: @@ -284,32 +339,26 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, by the arguments """ - start_ups = scenario_creator_kwargs["start_ups"] # Finding demands from stage 1 to t + starting_d = scenario_creator_kwargs.get("starting_d", parms["starting_d"][1]) if given_scenario is None: if stage == 1: - past_demands = [200] + past_demands = [starting_d] else: raise RuntimeError(f"sample_tree_scen_creator for aircond needs a 'given_scenario' argument if the starting stage is greater than 1") else: past_demands = [given_scenario.stage_models[t].Demand for t in given_scenario.T if t<=stage] - optional_things = ['mudev','sigmadev','start_ups'] - default_values = [0,40,False] - for thing,value in zip(optional_things,default_values): - if thing not in scenario_creator_kwargs: - scenario_creator_kwargs[thing] = value - - #Finding demands for stages after t + + kwargs = scenario_creator_kwargs.copy() + kwargs["start_seed"] = seed + + #Finding demands for stages after t (note the dynamic seed) future_demands,nodenames = _demands_creator(sname, sample_branching_factors, - start_seed = seed, - mudev = scenario_creator_kwargs['mudev'], - sigmadev = scenario_creator_kwargs['sigmadev'], - starting_d=past_demands[stage-1], - root_name='ROOT'+'_0'*(stage-1)) + root_name='ROOT'+'_0'*(stage-1), **kwargs) demands = past_demands+future_demands[1:] #The demand at the starting stage is in both past and future demands - model = aircond_model_creator(demands, start_ups=start_ups) + model = aircond_model_creator(demands, **scenario_creator_kwargs) model._mpisppy_probability = 1/np.prod(sample_branching_factors) @@ -332,61 +381,78 @@ def scenario_names_creator(num_scens,start=None): #========= def inparser_adder(inparser): # (only for Amalgomator): add command options unique to aircond - inparser.add_argument("--mu-dev", - help="average deviation of demand between two periods (default 0)", - dest="mudev", - type=float, - default=0.) - inparser.add_argument("--sigma-dev", - help="average standard deviation of demands between two periods (default 40)", - dest="sigmadev", - type=float, - default=40.) + # Do not change the defaults. + def _doone(name, helptext, argname=None): + # The name should be the name in parms + # helptext should not include the default + aname = name.replace("_", "-") if argname is None else argname + h = f"{helptext} (default {parms[name][1]})" + inparser.add_argument(f"--{aname}", + help=h, + dest=name, + type=parms[name][0], + default=parms[name][1]) + + + _doone("mudev", "average deviation of demand between two periods", argname="mu-dev") + _doone("sigmadev", "standard deviation of deviation of demand between two periods", argname="sigma-dev") + + d = parms["start_ups"] inparser.add_argument("--start-ups", - help="Include start-up costs in model (this is a MIP)", + help="Include start-up costs in model, resulting in a MPI (default {d})", dest="start_ups", action="store_true" ) - inparser.set_defaults(start_ups=False) - inparser.add_argument("--start-seed", - help="random number seed (default 1134)", - dest="start_seed", - type=int, - default=1134) - + inparser.set_defaults(start_ups=d) + + _doone("StartUpCost", helptext="Cost if production in a period is non-zero and start-up is True") + _doone("start_seed", helptext="random number seed") + _doone("min_d", helptext="minimum demand in a period") + _doone("max_d", helptext="maximum demand in a period") + _doone("starting_d", helptext="period 0 demand") + _doone("InventoryCost", helptext="Inventory cost per period per item") + _doone("BeginInventory", helptext="Inital Inventory") + _doone("LastInventoryCost", helptext="Inventory `cost` (should be negative) in last period)") + _doone("Capacity", helptext="Per period regular time capacity") + _doone("RegularProdCost", helptext="Regular time producion cost") + _doone("OvertimeProdCost", helptext="Overtime (or subcontractor) production cost") + _doone("NegInventoryCost", helptext="Linear coefficient for backorders (should be negative; not used in last stage)") + _doone("QuadShortCoeff", helptext="Coefficient for backorders squared (should be nonnegative; not used in last stage)") + return inparser #========= def kw_creator(options): - + # TBD: re-write this function... + if "kwargs" in options: + return options["kwargs"] + + kwargs = dict() + def _kwarg(option_name, default = None, arg_name=None): # options trumps args retval = options.get(option_name) if retval is not None: - return retval + kwargs[option_name] = retval + return args = options.get('args') aname = option_name if arg_name is None else arg_name retval = getattr(args, aname) if hasattr(args, aname) else None retval = default if retval is None else retval - return retval + kwargs[option_name] = retval # (only for Amalgomator): linked to the scenario_creator and inparser_adder # for confidence intervals, we need to see if the values are in args - BFs = _kwarg("branching_factors") - mudev = _kwarg("mudev", 0.) - sigmadev = _kwarg("sigmadev", 40.) - start_seed = _kwarg("start_seed", 1134) - start_ups = _kwarg("start_ups", None) - kwargs = {"num_scens" : options.get('num_scens', None), - "branching_factors": BFs, - "mudev": mudev, - "sigmadev": sigmadev, - "start_seed": start_seed, - "start_ups": start_ups, - } + _kwarg("branching_factors") + for idx, tpl in parms.items(): + _kwarg(idx, tpl[1]) + if kwargs["start_ups"] is None: - raise ValueError("kw_creator called, but no value given for start_ups") + raise ValueError(f"kw_creator called, but no value given for start_ups, {options =}") + if kwargs["start_seed"] is None: + raise ValueError(f"kw_creator called, but no value given for start_seed, {options =}") + return kwargs diff --git a/mpisppy/tests/test_conf_int_aircond.py b/mpisppy/tests/test_conf_int_aircond.py index babb65443..47fa0670f 100644 --- a/mpisppy/tests/test_conf_int_aircond.py +++ b/mpisppy/tests/test_conf_int_aircond.py @@ -45,25 +45,32 @@ def setUpClass(self): self.xhatpath = "farmer_cyl_nonants.spy.npy" def _get_base_options(self): - options = { "EF_solver_name": solvername, + # Base option has batch options + # plus kwoptions to pass to kw_creator + # and kwargs, which is the result of that. + kwoptions = { "EF_solver_name": solvername, "start_ups": False, "branching_factors": [4, 3, 2], "num_scens": 24, + "start_seed": 0, "EF-mstage": True} - Baseoptions = {"num_batches": 5, + Baseoptions = {"from_Baseoptions": True, + "num_batches": 5, "batch_size": 6, - "opt":options} - scenario_creator_kwargs = aircond.kw_creator(options) + "kwoptions":kwoptions} + scenario_creator_kwargs = aircond.kw_creator(kwoptions) Baseoptions['kwargs'] = scenario_creator_kwargs return Baseoptions def _get_xhatEval_options(self): + Baseoptions = self._get_base_options() options = {"iter0_solver_options": None, "iterk_solver_options": None, "display_timing": False, "solvername": solvername, "verbose": False, "solver_options":None} + options.update(Baseoptions["kwoptions"]) return options def _get_xhat_gen_options(self, BFs): @@ -96,10 +103,29 @@ def setUp(self): def tearDown(self): os.remove(self.xhat_path) + + def _eval_creator(self): + options = self._get_xhatEval_options() + + MMW_options = self._get_base_options() + branching_factors= MMW_options['kwoptions']['branching_factors'] + scen_count = np.prod(branching_factors) + scenario_creator_kwargs = MMW_options['kwargs'] + ###scenario_creator_kwargs['num_scens'] = MMW_options['batch_size'] + all_scenario_names = aircond.scenario_names_creator(scen_count) + all_nodenames = sputils.create_nodenames_from_branching_factors(branching_factors) + ev = Xhat_Eval(options, + all_scenario_names, + aircond.scenario_creator, + scenario_denouement=None, + all_nodenames=all_nodenames, + scenario_creator_kwargs=scenario_creator_kwargs + ) + return ev def test_ama_creator(self): options = self._get_base_options() - ama_options = options['opt'] + ama_options = options['kwoptions'] ama_object = ama.from_module(self.refmodelname, options=ama_options, use_command_line=False) @@ -109,9 +135,9 @@ def test_MMW_constructor(self): xhat = ciutils.read_xhat(self.xhat_path) MMW = MMWci.MMWConfidenceIntervals(self.refmodelname, - options['opt'], + options['kwoptions'], xhat, - options['num_batches'], batch_size = options["batch_size"], start = options['opt']['num_scens']) + options['num_batches'], batch_size = options["batch_size"], start = options['kwoptions']['num_scens']) def test_seqsampling_creator(self): BFs = [4, 3, 2] @@ -137,7 +163,7 @@ def test_seqsampling_creator(self): def test_indepscens_seqsampling_creator(self): options = self._get_base_options() - branching_factors= options['opt']['branching_factors'] + branching_factors= options['kwoptions']['branching_factors'] xhat_gen_options = self._get_xhat_gen_options(branching_factors) # We want a very small instance for testing on GitHub. @@ -164,32 +190,12 @@ def test_indepscens_seqsampling_creator(self): "no solver is available") def test_ama_running(self): options = self._get_base_options() - ama_options = options['opt'] + ama_options = options['kwoptions'] ama_object = ama.from_module(self.refmodelname, ama_options, use_command_line=False) ama_object.run() obj = round_pos_sig(ama_object.EF_Obj,2) - self.assertEqual(obj, 800.) - - - def _eval_creator(self): - options = self._get_xhatEval_options() - - MMW_options = self._get_base_options() - branching_factors= MMW_options['opt']['branching_factors'] - scen_count = np.prod(branching_factors) - scenario_creator_kwargs = MMW_options['kwargs'] - scenario_creator_kwargs['num_scens'] = MMW_options['batch_size'] - all_scenario_names = aircond.scenario_names_creator(scen_count) - all_nodenames = sputils.create_nodenames_from_branching_factors(branching_factors) - ev = Xhat_Eval(options, - all_scenario_names, - aircond.scenario_creator, - scenario_denouement=None, - all_nodenames=all_nodenames, - scenario_creator_kwargs=scenario_creator_kwargs - ) - return ev + self.assertEqual(obj, 810.) @unittest.skipIf(not solver_available, @@ -204,18 +210,17 @@ def test_xhat_eval_evaluate(self): ev = self._eval_creator() base_options = self._get_base_options() - branching_factors= base_options['opt']['branching_factors'] + branching_factors= base_options['kwoptions']['branching_factors'] full_xhat = self._make_full_xhat(branching_factors) obj = round_pos_sig(ev.evaluate(full_xhat),2) - self.assertEqual(obj, 890.0) # rebaselined 5 Dec 2021 - + self.assertEqual(obj, 960.0) # rebaselined 28 Dec 2021 @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_eval_evaluate_one(self): ev = self._eval_creator() options = self._get_base_options() - branching_factors= options['opt']['branching_factors'] + branching_factors= options['kwoptions']['branching_factors'] full_xhat = self._make_full_xhat(branching_factors) num_scens = np.prod(branching_factors) @@ -226,8 +231,7 @@ def test_xhat_eval_evaluate_one(self): k = all_scenario_names[0] obj = ev.evaluate_one(full_xhat,k,ev.local_scenarios[k]) obj = round_pos_sig(obj,2) - self.assertEqual(obj, 820.0) - + self.assertEqual(obj, 990.0) # rebaselined 28 Dec 2021 @unittest.skipIf(not solver_available, "no solver is available") @@ -235,26 +239,26 @@ def test_MMW_running(self): options = self._get_base_options() xhat = ciutils.read_xhat(self.xhat_path) MMW = MMWci.MMWConfidenceIntervals(self.refmodelname, - options['opt'], + options['kwoptions'], xhat, options['num_batches'], batch_size = options["batch_size"], - start = options['opt']['num_scens']) + start = options['kwoptions']['num_scens']) r = MMW.run() s = round_pos_sig(r['std'],2) bound = round_pos_sig(r['gap_inner_bound'],2) - self.assertEqual((s,bound), (24.0, 49.0)) + self.assertEqual((s,bound), (28.0, 110.0)) @unittest.skipIf(not solver_available, "no solver is available") def test_gap_estimators(self): options = self._get_base_options() - branching_factors= options['opt']['branching_factors'] + branching_factors= options['kwoptions']['branching_factors'] scen_count = np.prod(branching_factors) scenario_names = aircond.scenario_names_creator(scen_count, start=1000) sample_options = {"seed": 0, - "branching_factors": options['opt']['branching_factors']} + "branching_factors": options['kwoptions']['branching_factors']} estim = ciutils.gap_estimators(self.xhat, self.refmodelname, solving_type="EF-mstage", @@ -266,14 +270,14 @@ def test_gap_estimators(self): G = estim['G'] s = estim['s'] G,s = round_pos_sig(G,3),round_pos_sig(s,3) - self.assertEqual((G,s), (7.51, 26.4)) # rebaselined 5 Dec 2021 + self.assertEqual((G,s), (64.5, 31.8)) # rebaselined 28 Dec 2021 (see also 5 dec) @unittest.skipIf(not solver_available, "no solver is available") def test_indepscens_seqsampling_running(self): options = self._get_base_options() - branching_factors= options['opt']['branching_factors'] + branching_factors= options['kwoptions']['branching_factors'] xhat_gen_options = self._get_xhat_gen_options(branching_factors) # We want a very small instance for testing on GitHub. @@ -286,6 +290,7 @@ def test_indepscens_seqsampling_running(self): "branching_factors": branching_factors, "xhat_gen_options": xhat_gen_options, "start_ups": False, + "start_seed": 0, "solvername":solvername, } seq_pb = multi_seqsampling.IndepScens_SeqSampling(self.refmodelname, @@ -298,7 +303,7 @@ def test_indepscens_seqsampling_running(self): x = seq_pb.run(maxit=50) T = x['T'] ub = round_pos_sig(x['CI'][1],2) - self.assertEqual((T,ub), (13, 110.0)) + self.assertEqual((T,ub), (4, 67.0)) @unittest.skipIf(not solver_available, @@ -316,10 +321,10 @@ def test_zhat4xhat(self): zhatbar, eps_z = zhat4xhat._main_body(args, model_module) z2 = round_pos_sig(zhatbar, 2) - self.assertEqual(z2, 900.) + self.assertEqual(z2, 1400.) e2 = round_pos_sig(eps_z, 2) - self.assertEqual(e2, 74.) - print(f"*** {z2 =} {e2 =}") + self.assertEqual(e2, 84.) + #print(f"*** {z2 =} {e2 =}") if __name__ == '__main__': unittest.main() diff --git a/mpisppy/utils/amalgomator.py b/mpisppy/utils/amalgomator.py index d71a25a48..d073f8c75 100644 --- a/mpisppy/utils/amalgomator.py +++ b/mpisppy/utils/amalgomator.py @@ -52,7 +52,6 @@ ? Should baseparser functions "register themselves" in a import-name-string: function map that is global to baseparsers (i.e. importable)? - """ import numpy as np import importlib diff --git a/mpisppy/utils/baseparsers.py b/mpisppy/utils/baseparsers.py index 98449e542..5f4db39d0 100644 --- a/mpisppy/utils/baseparsers.py +++ b/mpisppy/utils/baseparsers.py @@ -162,18 +162,6 @@ def _basic_multistage(progname=None, num_scens_reqd=False): type=int, default=None) - if num_scens_reqd: - parser.add_argument( - "num_scens", help="Number of scenarios", type=int - ) - else: - parser.add_argument( - "--num-scens", - help="Number of scenarios (default None)", - dest="num_scens", - type=int, - default=None, - ) return parser diff --git a/mpisppy/utils/sputils.py b/mpisppy/utils/sputils.py index 7ee6df4c0..bc68b6d66 100644 --- a/mpisppy/utils/sputils.py +++ b/mpisppy/utils/sputils.py @@ -64,14 +64,16 @@ def scenario_tree_solution_writer( directory_name, scenario_name, scenario, bund descend_into=True, active=True, sort=True): + var_name = var.name + if bundling: + dot_index = var_name.find('.') + assert dot_index >= 0 + var_name = var_name[(dot_index+1):] # should this be here? if not var.stale: - var_name = var.name - if bundling: - dot_index = var_name.find('.') - assert dot_index >= 0 - var_name = var_name[(dot_index+1):] f.write(f"{var_name},{pyo.value(var)}\n") + else: + f.write(f"{var_name},{pyo.value(var)} (stale)\n") def write_spin_the_wheel_first_stage_solution(spcomm, opt_dict, solution_file_name, first_stage_solution_writer=first_stage_nonant_writer):